after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def _hook_get_(hook_self, torch_type):
def get_(self, reduce=lambda x: x[0]):
"""
Gets a Torch object from its current owners.
Args:
reduce: (EXPERIMENTAL) How to reduce tensors that come from
multiple workers
"""
# TODO: fully generalize this to multiple workers; consider
# adding arguments for other tensor ids, e.g. mapping workers
# to tensors, and a reduce function (for example, would allow
# for built-in gradient averaging when Variable.get is done)
# (low priority)
try:
assert len(self.owners) == 1
except AssertionError:
raise NotImplementedError(
"Only able to get_ tensors belonging \
to a single worker right now."
)
if hook_self.local_worker.id in self.owners:
return self
_out = hook_self.local_worker.request_obj(
obj_id=self.id, recipient=self.owners[0]
)
x, request_obj_cleanup_method = _out
hook_self.local_worker.register_object(hook_self.local_worker, x, id=x.id)
# if self == tensor
_id = hook_self.local_worker.id # for brevity
if (
type(self) != torch.autograd.variable.Variable
and type(self) != torch.nn.parameter.Parameter
):
_os = self.old_set_(x.type(self.type()))
self = hook_self.local_worker.register_object(
hook_self.local_worker, _os, id=self.id, owners=[_id]
)
else:
_os = self.old_set_(x.type(self.data.type())) # for brevity
self = hook_self.local_worker.register_object(
hook_self.local_worker, _os, id=self.id, owners=[_id]
)
self.data = hook_self.local_worker.register_object(
hook_self.local_worker, x.data, id=x.data.id, owners=[_id]
)
if x.grad is not None:
self.grad = hook_self.local_worker.register_object(
hook_self.local_worker, x.grad, id=x.grad.id, owners=[_id]
)
"""for some reason, when retuning obj from request_obj
method (above), the gradient gets re-initialized without
being re-registered and as a consequence does not have an
id, causing the last register_object above to fail
because x.grad.id does not exist. As a result, we've needed
to register objects temporarily which seems to
fix it. Super strange bug which took multiple days to figure
out. The true cause is still unknown but this
workaround seems to work well for now. Anyway, we don't need
the temporary objects past this point.
request_obj_cleanup_method()"""
return self
setattr(torch_type, "get_", get_)
# TODO: make this a non-inline version
setattr(torch_type, "get", get_)
|
def _hook_get_(hook_self, torch_type):
def get_(self, reduce=lambda x: x[0]):
"""
Gets a Torch object from its current owners.
Args:
reduce: (EXPERIMENTAL) How to reduce tensors that come from
multiple workers
"""
# TODO: fully generalize this to multiple workers; consider
# adding arguments for other tensor ids, e.g. mapping workers
# to tensors, and a reduce function (for example, would allow
# for built-in gradient averaging when Variable.get is done)
# (low priority)
try:
assert len(self.owners) == 1
except AssertionError:
raise NotImplementedError(
"Only able to get_ tensors belonging \
to a single worker right now."
)
if hook_self.local_worker.id in self.owners:
return self
_out = hook_self.local_worker.request_obj(
obj_id=self.id, recipient=self.owners[0]
)
x, request_obj_cleanup_method = _out
hook_self.local_worker.register_object(hook_self.local_worker, x, id=x.id)
# if self == tensor
_id = hook_self.local_worker.id # for brevity
if type(self) != torch.autograd.variable.Variable:
_os = self.old_set_(x.type(self.type()))
self = hook_self.local_worker.register_object(
hook_self.local_worker, _os, id=self.id, owners=[_id]
)
else:
_os = self.old_set_(x.type(self.data.type())) # for brevity
self = hook_self.local_worker.register_object(
hook_self.local_worker, _os, id=self.id, owners=[_id]
)
self.data = hook_self.local_worker.register_object(
hook_self.local_worker, x.data, id=x.data.id, owners=[_id]
)
if x.grad is not None:
self.grad = hook_self.local_worker.register_object(
hook_self.local_worker, x.grad, id=x.grad.id, owners=[_id]
)
"""for some reason, when retuning obj from request_obj
method (above), the gradient gets re-initialized without
being re-registered and as a consequence does not have an
id, causing the last register_object above to fail
because x.grad.id does not exist. As a result, we've needed
to register objects temporarily which seems to
fix it. Super strange bug which took multiple days to figure
out. The true cause is still unknown but this
workaround seems to work well for now. Anyway, we don't need
the temporary objects past this point.
request_obj_cleanup_method()"""
return self
setattr(torch_type, "get_", get_)
# TODO: make this a non-inline version
setattr(torch_type, "get", get_)
|
https://github.com/OpenMined/PySyft/issues/1350
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-14-4207de4e45fd> in <module>()
10 loss.backward()
11
---> 12 model.send_(remote)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in send_(self, workers)
907 owners=self.owners, is_pointer=True)
908
--> 909 return hook_self._var_to_pointer(self, hook_self)
910
911 setattr(torch.autograd.variable.Variable, 'send_', send_)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
982 print("_var_to_pointer:" + str(var.data.id))
983 if var.grad is not None:
--> 984 self._var_to_pointer(var.grad, hook_self)
985
986 var.data.old_set_(var.data.__class__(0))
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
989 id=var.data.id,
990 owners=var.owners,
--> 991 is_pointer=True)
992 return var
993
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/workers.py in register_object(self, worker, obj, force_attach_to_worker, temporary, **kwargs)
508 raise RuntimeError(
509 'Invalid registry: is_pointer is {} but owners is {} on tensor {}'.format(
--> 510 obj.is_pointer, obj.owners, obj.id))
511 # print("setting object:" + str(obj.id))
512 self.set_obj(obj.id, obj, force=force_attach_to_worker, tmp=temporary)
RuntimeError: Invalid registry: is_pointer is True but owners is [0] on tensor 2841986768
|
RuntimeError
|
def get_(self, reduce=lambda x: x[0]):
"""
Gets a Torch object from its current owners.
Args:
reduce: (EXPERIMENTAL) How to reduce tensors that come from
multiple workers
"""
# TODO: fully generalize this to multiple workers; consider
# adding arguments for other tensor ids, e.g. mapping workers
# to tensors, and a reduce function (for example, would allow
# for built-in gradient averaging when Variable.get is done)
# (low priority)
try:
assert len(self.owners) == 1
except AssertionError:
raise NotImplementedError(
"Only able to get_ tensors belonging \
to a single worker right now."
)
if hook_self.local_worker.id in self.owners:
return self
_out = hook_self.local_worker.request_obj(obj_id=self.id, recipient=self.owners[0])
x, request_obj_cleanup_method = _out
hook_self.local_worker.register_object(hook_self.local_worker, x, id=x.id)
# if self == tensor
_id = hook_self.local_worker.id # for brevity
if (
type(self) != torch.autograd.variable.Variable
and type(self) != torch.nn.parameter.Parameter
):
_os = self.old_set_(x.type(self.type()))
self = hook_self.local_worker.register_object(
hook_self.local_worker, _os, id=self.id, owners=[_id]
)
else:
_os = self.old_set_(x.type(self.data.type())) # for brevity
self = hook_self.local_worker.register_object(
hook_self.local_worker, _os, id=self.id, owners=[_id]
)
self.data = hook_self.local_worker.register_object(
hook_self.local_worker, x.data, id=x.data.id, owners=[_id]
)
if x.grad is not None:
self.grad = hook_self.local_worker.register_object(
hook_self.local_worker, x.grad, id=x.grad.id, owners=[_id]
)
"""for some reason, when retuning obj from request_obj
method (above), the gradient gets re-initialized without
being re-registered and as a consequence does not have an
id, causing the last register_object above to fail
because x.grad.id does not exist. As a result, we've needed
to register objects temporarily which seems to
fix it. Super strange bug which took multiple days to figure
out. The true cause is still unknown but this
workaround seems to work well for now. Anyway, we don't need
the temporary objects past this point.
request_obj_cleanup_method()"""
return self
|
def get_(self, reduce=lambda x: x[0]):
"""
Gets a Torch object from its current owners.
Args:
reduce: (EXPERIMENTAL) How to reduce tensors that come from
multiple workers
"""
# TODO: fully generalize this to multiple workers; consider
# adding arguments for other tensor ids, e.g. mapping workers
# to tensors, and a reduce function (for example, would allow
# for built-in gradient averaging when Variable.get is done)
# (low priority)
try:
assert len(self.owners) == 1
except AssertionError:
raise NotImplementedError(
"Only able to get_ tensors belonging \
to a single worker right now."
)
if hook_self.local_worker.id in self.owners:
return self
_out = hook_self.local_worker.request_obj(obj_id=self.id, recipient=self.owners[0])
x, request_obj_cleanup_method = _out
hook_self.local_worker.register_object(hook_self.local_worker, x, id=x.id)
# if self == tensor
_id = hook_self.local_worker.id # for brevity
if type(self) != torch.autograd.variable.Variable:
_os = self.old_set_(x.type(self.type()))
self = hook_self.local_worker.register_object(
hook_self.local_worker, _os, id=self.id, owners=[_id]
)
else:
_os = self.old_set_(x.type(self.data.type())) # for brevity
self = hook_self.local_worker.register_object(
hook_self.local_worker, _os, id=self.id, owners=[_id]
)
self.data = hook_self.local_worker.register_object(
hook_self.local_worker, x.data, id=x.data.id, owners=[_id]
)
if x.grad is not None:
self.grad = hook_self.local_worker.register_object(
hook_self.local_worker, x.grad, id=x.grad.id, owners=[_id]
)
"""for some reason, when retuning obj from request_obj
method (above), the gradient gets re-initialized without
being re-registered and as a consequence does not have an
id, causing the last register_object above to fail
because x.grad.id does not exist. As a result, we've needed
to register objects temporarily which seems to
fix it. Super strange bug which took multiple days to figure
out. The true cause is still unknown but this
workaround seems to work well for now. Anyway, we don't need
the temporary objects past this point.
request_obj_cleanup_method()"""
return self
|
https://github.com/OpenMined/PySyft/issues/1350
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-14-4207de4e45fd> in <module>()
10 loss.backward()
11
---> 12 model.send_(remote)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in send_(self, workers)
907 owners=self.owners, is_pointer=True)
908
--> 909 return hook_self._var_to_pointer(self, hook_self)
910
911 setattr(torch.autograd.variable.Variable, 'send_', send_)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
982 print("_var_to_pointer:" + str(var.data.id))
983 if var.grad is not None:
--> 984 self._var_to_pointer(var.grad, hook_self)
985
986 var.data.old_set_(var.data.__class__(0))
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
989 id=var.data.id,
990 owners=var.owners,
--> 991 is_pointer=True)
992 return var
993
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/workers.py in register_object(self, worker, obj, force_attach_to_worker, temporary, **kwargs)
508 raise RuntimeError(
509 'Invalid registry: is_pointer is {} but owners is {} on tensor {}'.format(
--> 510 obj.is_pointer, obj.owners, obj.id))
511 # print("setting object:" + str(obj.id))
512 self.set_obj(obj.id, obj, force=force_attach_to_worker, tmp=temporary)
RuntimeError: Invalid registry: is_pointer is True but owners is [0] on tensor 2841986768
|
RuntimeError
|
def _hook_variable(self):
# Overload 'special' methods here
self._hook_var___new__()
self._hook_var_contents()
self._hook_var_owners()
for attr in dir(torch.autograd.variable.Variable):
# Conditions for inclusion/exclusion
if attr in self.exclude + self.var_exclude:
continue
lit = getattr(torch.autograd.variable.Variable, attr)
is_base = attr in dir(object)
is_desc = inspect.ismethoddescriptor(lit)
# is_func = isinstance(type(lit), types.FunctionType)
is_func = isinstance(lit, types.FunctionType)
try:
is_service_func = "HookService" in lit.__qualname__
except:
is_service_func = False
is_old = re.match("old*", attr) is not None
# Where the overloading happens
if (
(is_desc or (is_func and not is_service_func))
and not is_base
and not is_old
):
passer = self._pass_method_args(lit)
new_attr = self._overload_method(passer)
setattr(torch.autograd.variable.Variable, "old_{}".format(attr), lit)
setattr(torch.autograd.variable.Variable, attr, new_attr)
self._hook_var_send_()
self._hook_get_(torch.autograd.variable.Variable)
self._hook_var_ser()
|
def _hook_variable(self):
# Overload 'special' methods here
self._hook_var___new__()
self._hook_var_contents()
for attr in dir(torch.autograd.variable.Variable):
# Conditions for inclusion/exclusion
if attr in self.exclude + self.var_exclude:
continue
lit = getattr(torch.autograd.variable.Variable, attr)
is_base = attr in dir(object)
is_desc = inspect.ismethoddescriptor(lit)
# is_func = isinstance(type(lit), types.FunctionType)
is_func = isinstance(lit, types.FunctionType)
try:
is_service_func = "HookService" in lit.__qualname__
except:
is_service_func = False
is_old = re.match("old*", attr) is not None
# Where the overloading happens
if (
(is_desc or (is_func and not is_service_func))
and not is_base
and not is_old
):
passer = self._pass_method_args(lit)
new_attr = self._overload_method(passer)
setattr(torch.autograd.variable.Variable, "old_{}".format(attr), lit)
setattr(torch.autograd.variable.Variable, attr, new_attr)
self._hook_var_send_()
self._hook_get_(torch.autograd.variable.Variable)
self._hook_var_ser()
|
https://github.com/OpenMined/PySyft/issues/1350
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-14-4207de4e45fd> in <module>()
10 loss.backward()
11
---> 12 model.send_(remote)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in send_(self, workers)
907 owners=self.owners, is_pointer=True)
908
--> 909 return hook_self._var_to_pointer(self, hook_self)
910
911 setattr(torch.autograd.variable.Variable, 'send_', send_)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
982 print("_var_to_pointer:" + str(var.data.id))
983 if var.grad is not None:
--> 984 self._var_to_pointer(var.grad, hook_self)
985
986 var.data.old_set_(var.data.__class__(0))
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
989 id=var.data.id,
990 owners=var.owners,
--> 991 is_pointer=True)
992 return var
993
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/workers.py in register_object(self, worker, obj, force_attach_to_worker, temporary, **kwargs)
508 raise RuntimeError(
509 'Invalid registry: is_pointer is {} but owners is {} on tensor {}'.format(
--> 510 obj.is_pointer, obj.owners, obj.id))
511 # print("setting object:" + str(obj.id))
512 self.set_obj(obj.id, obj, force=force_attach_to_worker, tmp=temporary)
RuntimeError: Invalid registry: is_pointer is True but owners is [0] on tensor 2841986768
|
RuntimeError
|
def _hook_new_grad(hook_self):
@property
def new_grad(self):
if not hasattr(self, "grad_registered"):
if self.old_grad is not None:
if hasattr(self.old_grad, "id"):
grad_id = self.old_grad.id
else:
grad_id = None
# this seems a little sketch - why are we having to check to see whether
# the parent has been registered. Is there and edge case where gradients
# are created before their parents? TODO: fix
if not hasattr(self, "owners"):
hook_self.local_worker.register_object(
hook_self.local_worker,
obj=self,
owners=[hook_self.local_worker.id],
is_pointer=False,
)
_ip = self.is_pointer
self.old_grad = hook_self.local_worker.register_object(
hook_self.local_worker,
obj=self.old_grad,
owners=self.owners,
id=grad_id,
is_pointer=_ip,
)
self.grad_registered = True
# DO NOT REMOVE THIS LINE UNLESS YOU KNOW WHAT YOU'RE DOING
# for context behidn this edit you can see the following video
# https://www.twitch.tv/videos/275838386
# long story short, we need to actually run the grad generating
# function (self.old_grad) and cache its value (the variable's
# gradient) in self.grad_backup so that python garbage collection
# doesn't delete the python object as a part of PyTorch's C++
# wrapping craziness (which does a lot of re-instantiating objects)
# In this case, re-instantiating the object gives it a new id because
# the object containing the old id goes away... this id is random which
# can create problems for PySyft
# also - keep this running ONLY within the if statement above that checks
# to see if self.grad_registered is not yet an attribute
self.grad_backup = self.old_grad
self.grad_backup.owners_backup = self.grad_backup.owners
return self.old_grad
@new_grad.setter
def new_grad(self, new):
self.old_grad = new
torch.autograd.variable.Variable.grad = new_grad
|
def _hook_new_grad(hook_self):
@property
def new_grad(self):
if not hasattr(self, "grad_registered"):
if self.old_grad is not None:
if hasattr(self.old_grad, "id"):
grad_id = self.old_grad.id
else:
grad_id = None
if not hasattr(self, "owners"):
hook_self.local_worker.register_object(
hook_self.local_worker,
obj=self,
owners=[hook_self.local_worker.id],
is_pointer=False,
)
_ip = self.is_pointer
self.old_grad = hook_self.local_worker.register_object(
hook_self.local_worker,
obj=self.old_grad,
owners=self.owners,
id=grad_id,
is_pointer=_ip,
)
self.grad_registered = True
return self.old_grad
@new_grad.setter
def new_grad(self, new):
self.old_grad = new
torch.autograd.variable.Variable.grad = new_grad
|
https://github.com/OpenMined/PySyft/issues/1350
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-14-4207de4e45fd> in <module>()
10 loss.backward()
11
---> 12 model.send_(remote)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in send_(self, workers)
907 owners=self.owners, is_pointer=True)
908
--> 909 return hook_self._var_to_pointer(self, hook_self)
910
911 setattr(torch.autograd.variable.Variable, 'send_', send_)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
982 print("_var_to_pointer:" + str(var.data.id))
983 if var.grad is not None:
--> 984 self._var_to_pointer(var.grad, hook_self)
985
986 var.data.old_set_(var.data.__class__(0))
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
989 id=var.data.id,
990 owners=var.owners,
--> 991 is_pointer=True)
992 return var
993
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/workers.py in register_object(self, worker, obj, force_attach_to_worker, temporary, **kwargs)
508 raise RuntimeError(
509 'Invalid registry: is_pointer is {} but owners is {} on tensor {}'.format(
--> 510 obj.is_pointer, obj.owners, obj.id))
511 # print("setting object:" + str(obj.id))
512 self.set_obj(obj.id, obj, force=force_attach_to_worker, tmp=temporary)
RuntimeError: Invalid registry: is_pointer is True but owners is [0] on tensor 2841986768
|
RuntimeError
|
def _hook_var_send_(hook_self):
def send_(self, workers):
"""
Sends a Variable object to a (sequence of) Grid workers.
Args:
workers: string (or sequence) containing IPFS address(es)
of worker node(s).
"""
# makes singleton if needed
workers = hook_self.local_worker._check_workers(self, workers)
# NEW OWNERS: this re-registers the current variable to have new owners!
# After this line, self.owners should point to workers (the input variable)
self = hook_self.local_worker.register_object(
hook_self.local_worker, obj=self, id=self.id, owners=workers
)
for worker in workers:
# TODO: sync or async? likely won't be worth doing async,
# but should check (low priority)
hook_self.local_worker.send_obj(self, worker)
# NEW IS_POINTER STATUS. This line changes the is_pointer flag to true.
hook_self.local_worker.register_object(
hook_self.local_worker,
obj=self,
id=self.id,
owners=self.owners,
is_pointer=True,
)
return hook_self._var_to_pointer(self, hook_self)
setattr(torch.autograd.variable.Variable, "send_", send_)
|
def _hook_var_send_(hook_self):
def send_(self, workers):
"""
Sends a Variable object to a (sequence of) Grid workers.
Args:
workers: string (or sequence) containing IPFS address(es)
of worker node(s).
"""
# makes singleton if needed
workers = hook_self.local_worker._check_workers(self, workers)
self = hook_self.local_worker.register_object(
hook_self.local_worker, obj=self, id=self.id, owners=workers
)
for worker in workers:
# TODO: sync or async? likely won't be worth doing async,
# but should check (low priority)
hook_self.local_worker.send_obj(self, worker)
hook_self.local_worker.register_object(
hook_self.local_worker,
obj=self,
id=self.id,
owners=self.owners,
is_pointer=True,
)
return hook_self._var_to_pointer(self, hook_self)
setattr(torch.autograd.variable.Variable, "send_", send_)
|
https://github.com/OpenMined/PySyft/issues/1350
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-14-4207de4e45fd> in <module>()
10 loss.backward()
11
---> 12 model.send_(remote)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in send_(self, workers)
907 owners=self.owners, is_pointer=True)
908
--> 909 return hook_self._var_to_pointer(self, hook_self)
910
911 setattr(torch.autograd.variable.Variable, 'send_', send_)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
982 print("_var_to_pointer:" + str(var.data.id))
983 if var.grad is not None:
--> 984 self._var_to_pointer(var.grad, hook_self)
985
986 var.data.old_set_(var.data.__class__(0))
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
989 id=var.data.id,
990 owners=var.owners,
--> 991 is_pointer=True)
992 return var
993
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/workers.py in register_object(self, worker, obj, force_attach_to_worker, temporary, **kwargs)
508 raise RuntimeError(
509 'Invalid registry: is_pointer is {} but owners is {} on tensor {}'.format(
--> 510 obj.is_pointer, obj.owners, obj.id))
511 # print("setting object:" + str(obj.id))
512 self.set_obj(obj.id, obj, force=force_attach_to_worker, tmp=temporary)
RuntimeError: Invalid registry: is_pointer is True but owners is [0] on tensor 2841986768
|
RuntimeError
|
def send_(self, workers):
"""
Sends a Variable object to a (sequence of) Grid workers.
Args:
workers: string (or sequence) containing IPFS address(es)
of worker node(s).
"""
# makes singleton if needed
workers = hook_self.local_worker._check_workers(self, workers)
# NEW OWNERS: this re-registers the current variable to have new owners!
# After this line, self.owners should point to workers (the input variable)
self = hook_self.local_worker.register_object(
hook_self.local_worker, obj=self, id=self.id, owners=workers
)
for worker in workers:
# TODO: sync or async? likely won't be worth doing async,
# but should check (low priority)
hook_self.local_worker.send_obj(self, worker)
# NEW IS_POINTER STATUS. This line changes the is_pointer flag to true.
hook_self.local_worker.register_object(
hook_self.local_worker,
obj=self,
id=self.id,
owners=self.owners,
is_pointer=True,
)
return hook_self._var_to_pointer(self, hook_self)
|
def send_(self, workers):
"""
Sends a Variable object to a (sequence of) Grid workers.
Args:
workers: string (or sequence) containing IPFS address(es)
of worker node(s).
"""
# makes singleton if needed
workers = hook_self.local_worker._check_workers(self, workers)
self = hook_self.local_worker.register_object(
hook_self.local_worker, obj=self, id=self.id, owners=workers
)
for worker in workers:
# TODO: sync or async? likely won't be worth doing async,
# but should check (low priority)
hook_self.local_worker.send_obj(self, worker)
hook_self.local_worker.register_object(
hook_self.local_worker,
obj=self,
id=self.id,
owners=self.owners,
is_pointer=True,
)
return hook_self._var_to_pointer(self, hook_self)
|
https://github.com/OpenMined/PySyft/issues/1350
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-14-4207de4e45fd> in <module>()
10 loss.backward()
11
---> 12 model.send_(remote)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in send_(self, workers)
907 owners=self.owners, is_pointer=True)
908
--> 909 return hook_self._var_to_pointer(self, hook_self)
910
911 setattr(torch.autograd.variable.Variable, 'send_', send_)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
982 print("_var_to_pointer:" + str(var.data.id))
983 if var.grad is not None:
--> 984 self._var_to_pointer(var.grad, hook_self)
985
986 var.data.old_set_(var.data.__class__(0))
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
989 id=var.data.id,
990 owners=var.owners,
--> 991 is_pointer=True)
992 return var
993
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/workers.py in register_object(self, worker, obj, force_attach_to_worker, temporary, **kwargs)
508 raise RuntimeError(
509 'Invalid registry: is_pointer is {} but owners is {} on tensor {}'.format(
--> 510 obj.is_pointer, obj.owners, obj.id))
511 # print("setting object:" + str(obj.id))
512 self.set_obj(obj.id, obj, force=force_attach_to_worker, tmp=temporary)
RuntimeError: Invalid registry: is_pointer is True but owners is [0] on tensor 2841986768
|
RuntimeError
|
def _hook_var_ser(hook_self):
def ser(self, include_data=True):
"""Serializes a variable into a JSON object"""
var_msg = {}
var_msg["torch_type"] = re.search("<class '(.*)'>", str(self.__class__)).group(
1
)
var_msg["requires_grad"] = self.requires_grad
var_msg["volatile"] = self.volatile
var_msg["data"] = self.data.ser(include_data)
if self.grad is not None:
var_msg["grad"] = self.grad.ser(include_data)
else:
var_msg["grad"] = None
var_msg["id"] = self.id
if type(self.owners[0]) is int:
var_msg["owners"] = self.owners
else:
var_msg["owners"] = list(map(lambda x: x.id, self.owners))
var_msg["is_pointer"] = not include_data
return json.dumps(var_msg)
def deser(self, obj_msg):
"""Deserializes a JSON object into a variable"""
if "data" in obj_msg.keys():
data_msg = json.loads(obj_msg["data"])
tensor_type = hook_self.types_guard(data_msg["torch_type"])
data_obj = tensor_type.deser(tensor_type, data_msg)
# data_obj = hook_self.build_tensor(data_msg, tensor_type)
data = hook_self.local_worker.handle_register(data_obj, data_msg)
if "grad" in obj_msg.keys():
if obj_msg["grad"] is not None:
grad_msg = json.loads(obj_msg["grad"])
var_type = hook_self.types_guard(grad_msg["torch_type"])
grad_obj = hook_self._build_var(grad_msg, var_type)
grad = hook_self.local_worker.handle_register(
grad_obj, grad_msg, force_attach_to_worker=False, temporary=True
)
else:
grad = None
# nn.parameter.Parameter does not accept "volatile" as an input param.
# https://pytorch.org/docs/0.3.1/autograd.html#variable
if self == torch.nn.parameter.Parameter:
var = self(data, requires_grad=obj_msg["requires_grad"])
else:
var = self(
data,
volatile=obj_msg["volatile"],
requires_grad=obj_msg["requires_grad"],
)
# var.grad = grad
if grad is not None:
setattr(var, "grad", grad)
else:
var.grad = None
# this returns grad because garbage collection seems to do something really strange
# if grad isn't returned here. It re-initializes the gradient somehow but in a way
# where it's not registered (which is bad)
return var
torch.autograd.variable.Variable.ser = ser
torch.autograd.variable.Variable.deser = deser
|
def _hook_var_ser(hook_self):
def ser(self, include_data=True):
var_msg = {}
var_msg["torch_type"] = re.search("<class '(.*)'>", str(self.__class__)).group(
1
)
var_msg["requires_grad"] = self.requires_grad
var_msg["volatile"] = self.volatile
var_msg["data"] = self.data.ser(include_data)
if self.grad is not None:
var_msg["grad"] = self.grad.ser(include_data)
else:
var_msg["grad"] = None
var_msg["id"] = self.id
if type(self.owners[0]) is int:
var_msg["owners"] = self.owners
else:
var_msg["owners"] = list(map(lambda x: x.id, self.owners))
var_msg["is_pointer"] = not include_data
return json.dumps(var_msg)
def deser(self, obj_msg):
if "data" in obj_msg.keys():
data_msg = json.loads(obj_msg["data"])
tensor_type = hook_self.types_guard(data_msg["torch_type"])
data_obj = tensor_type.deser(tensor_type, data_msg)
# data_obj = hook_self.build_tensor(data_msg, tensor_type)
data = hook_self.local_worker.handle_register(data_obj, data_msg)
if "grad" in obj_msg.keys():
if obj_msg["grad"] is not None:
grad_msg = json.loads(obj_msg["grad"])
var_type = hook_self.types_guard(grad_msg["torch_type"])
grad_obj = hook_self._build_var(grad_msg, var_type)
grad = hook_self.local_worker.handle_register(
grad_obj, grad_msg, force_attach_to_worker=False, temporary=True
)
else:
grad = None
var = self(
data, volatile=obj_msg["volatile"], requires_grad=obj_msg["requires_grad"]
)
# var.grad = grad
if grad is not None:
setattr(var, "grad", grad)
else:
var.grad = None
# this returns grad because garbage collection seems to do something really strange
# if grad isn't returned here. It re-initializes the gradient somehow but in a way
# where it's not registered (which is bad)
return var
torch.autograd.variable.Variable.ser = ser
torch.autograd.variable.Variable.deser = deser
|
https://github.com/OpenMined/PySyft/issues/1350
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-14-4207de4e45fd> in <module>()
10 loss.backward()
11
---> 12 model.send_(remote)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in send_(self, workers)
907 owners=self.owners, is_pointer=True)
908
--> 909 return hook_self._var_to_pointer(self, hook_self)
910
911 setattr(torch.autograd.variable.Variable, 'send_', send_)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
982 print("_var_to_pointer:" + str(var.data.id))
983 if var.grad is not None:
--> 984 self._var_to_pointer(var.grad, hook_self)
985
986 var.data.old_set_(var.data.__class__(0))
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
989 id=var.data.id,
990 owners=var.owners,
--> 991 is_pointer=True)
992 return var
993
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/workers.py in register_object(self, worker, obj, force_attach_to_worker, temporary, **kwargs)
508 raise RuntimeError(
509 'Invalid registry: is_pointer is {} but owners is {} on tensor {}'.format(
--> 510 obj.is_pointer, obj.owners, obj.id))
511 # print("setting object:" + str(obj.id))
512 self.set_obj(obj.id, obj, force=force_attach_to_worker, tmp=temporary)
RuntimeError: Invalid registry: is_pointer is True but owners is [0] on tensor 2841986768
|
RuntimeError
|
def ser(self, include_data=True):
"""Serializes a variable into a JSON object"""
var_msg = {}
var_msg["torch_type"] = re.search("<class '(.*)'>", str(self.__class__)).group(1)
var_msg["requires_grad"] = self.requires_grad
var_msg["volatile"] = self.volatile
var_msg["data"] = self.data.ser(include_data)
if self.grad is not None:
var_msg["grad"] = self.grad.ser(include_data)
else:
var_msg["grad"] = None
var_msg["id"] = self.id
if type(self.owners[0]) is int:
var_msg["owners"] = self.owners
else:
var_msg["owners"] = list(map(lambda x: x.id, self.owners))
var_msg["is_pointer"] = not include_data
return json.dumps(var_msg)
|
def ser(self, include_data=True):
var_msg = {}
var_msg["torch_type"] = re.search("<class '(.*)'>", str(self.__class__)).group(1)
var_msg["requires_grad"] = self.requires_grad
var_msg["volatile"] = self.volatile
var_msg["data"] = self.data.ser(include_data)
if self.grad is not None:
var_msg["grad"] = self.grad.ser(include_data)
else:
var_msg["grad"] = None
var_msg["id"] = self.id
if type(self.owners[0]) is int:
var_msg["owners"] = self.owners
else:
var_msg["owners"] = list(map(lambda x: x.id, self.owners))
var_msg["is_pointer"] = not include_data
return json.dumps(var_msg)
|
https://github.com/OpenMined/PySyft/issues/1350
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-14-4207de4e45fd> in <module>()
10 loss.backward()
11
---> 12 model.send_(remote)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in send_(self, workers)
907 owners=self.owners, is_pointer=True)
908
--> 909 return hook_self._var_to_pointer(self, hook_self)
910
911 setattr(torch.autograd.variable.Variable, 'send_', send_)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
982 print("_var_to_pointer:" + str(var.data.id))
983 if var.grad is not None:
--> 984 self._var_to_pointer(var.grad, hook_self)
985
986 var.data.old_set_(var.data.__class__(0))
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
989 id=var.data.id,
990 owners=var.owners,
--> 991 is_pointer=True)
992 return var
993
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/workers.py in register_object(self, worker, obj, force_attach_to_worker, temporary, **kwargs)
508 raise RuntimeError(
509 'Invalid registry: is_pointer is {} but owners is {} on tensor {}'.format(
--> 510 obj.is_pointer, obj.owners, obj.id))
511 # print("setting object:" + str(obj.id))
512 self.set_obj(obj.id, obj, force=force_attach_to_worker, tmp=temporary)
RuntimeError: Invalid registry: is_pointer is True but owners is [0] on tensor 2841986768
|
RuntimeError
|
def deser(self, obj_msg):
"""Deserializes a JSON object into a variable"""
if "data" in obj_msg.keys():
data_msg = json.loads(obj_msg["data"])
tensor_type = hook_self.types_guard(data_msg["torch_type"])
data_obj = tensor_type.deser(tensor_type, data_msg)
# data_obj = hook_self.build_tensor(data_msg, tensor_type)
data = hook_self.local_worker.handle_register(data_obj, data_msg)
if "grad" in obj_msg.keys():
if obj_msg["grad"] is not None:
grad_msg = json.loads(obj_msg["grad"])
var_type = hook_self.types_guard(grad_msg["torch_type"])
grad_obj = hook_self._build_var(grad_msg, var_type)
grad = hook_self.local_worker.handle_register(
grad_obj, grad_msg, force_attach_to_worker=False, temporary=True
)
else:
grad = None
# nn.parameter.Parameter does not accept "volatile" as an input param.
# https://pytorch.org/docs/0.3.1/autograd.html#variable
if self == torch.nn.parameter.Parameter:
var = self(data, requires_grad=obj_msg["requires_grad"])
else:
var = self(
data, volatile=obj_msg["volatile"], requires_grad=obj_msg["requires_grad"]
)
# var.grad = grad
if grad is not None:
setattr(var, "grad", grad)
else:
var.grad = None
# this returns grad because garbage collection seems to do something really strange
# if grad isn't returned here. It re-initializes the gradient somehow but in a way
# where it's not registered (which is bad)
return var
|
def deser(self, obj_msg):
if "data" in obj_msg.keys():
data_msg = json.loads(obj_msg["data"])
tensor_type = hook_self.types_guard(data_msg["torch_type"])
data_obj = tensor_type.deser(tensor_type, data_msg)
# data_obj = hook_self.build_tensor(data_msg, tensor_type)
data = hook_self.local_worker.handle_register(data_obj, data_msg)
if "grad" in obj_msg.keys():
if obj_msg["grad"] is not None:
grad_msg = json.loads(obj_msg["grad"])
var_type = hook_self.types_guard(grad_msg["torch_type"])
grad_obj = hook_self._build_var(grad_msg, var_type)
grad = hook_self.local_worker.handle_register(
grad_obj, grad_msg, force_attach_to_worker=False, temporary=True
)
else:
grad = None
var = self(
data, volatile=obj_msg["volatile"], requires_grad=obj_msg["requires_grad"]
)
# var.grad = grad
if grad is not None:
setattr(var, "grad", grad)
else:
var.grad = None
# this returns grad because garbage collection seems to do something really strange
# if grad isn't returned here. It re-initializes the gradient somehow but in a way
# where it's not registered (which is bad)
return var
|
https://github.com/OpenMined/PySyft/issues/1350
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-14-4207de4e45fd> in <module>()
10 loss.backward()
11
---> 12 model.send_(remote)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in send_(self, workers)
907 owners=self.owners, is_pointer=True)
908
--> 909 return hook_self._var_to_pointer(self, hook_self)
910
911 setattr(torch.autograd.variable.Variable, 'send_', send_)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
982 print("_var_to_pointer:" + str(var.data.id))
983 if var.grad is not None:
--> 984 self._var_to_pointer(var.grad, hook_self)
985
986 var.data.old_set_(var.data.__class__(0))
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
989 id=var.data.id,
990 owners=var.owners,
--> 991 is_pointer=True)
992 return var
993
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/workers.py in register_object(self, worker, obj, force_attach_to_worker, temporary, **kwargs)
508 raise RuntimeError(
509 'Invalid registry: is_pointer is {} but owners is {} on tensor {}'.format(
--> 510 obj.is_pointer, obj.owners, obj.id))
511 # print("setting object:" + str(obj.id))
512 self.set_obj(obj.id, obj, force=force_attach_to_worker, tmp=temporary)
RuntimeError: Invalid registry: is_pointer is True but owners is [0] on tensor 2841986768
|
RuntimeError
|
def _var_to_pointer(self, var, hook_self):
# recursively calls var_to_pointer in a depth first fashion
# only recursive through variables (ignores .data)
if var.grad is not None:
self._var_to_pointer(var.grad, hook_self)
# deletes local data (because now it's a pointer to remote data)
var.data.old_set_(var.data.__class__(0))
return var
|
def _var_to_pointer(self, var, hook_self):
if var.grad is not None:
self._var_to_pointer(var.grad, hook_self)
var.data.old_set_(var.data.__class__(0))
self.local_worker.register_object(
hook_self.local_worker,
obj=var.data,
id=var.data.id,
owners=var.owners,
is_pointer=True,
)
return var
|
https://github.com/OpenMined/PySyft/issues/1350
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-14-4207de4e45fd> in <module>()
10 loss.backward()
11
---> 12 model.send_(remote)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in send_(self, workers)
907 owners=self.owners, is_pointer=True)
908
--> 909 return hook_self._var_to_pointer(self, hook_self)
910
911 setattr(torch.autograd.variable.Variable, 'send_', send_)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
982 print("_var_to_pointer:" + str(var.data.id))
983 if var.grad is not None:
--> 984 self._var_to_pointer(var.grad, hook_self)
985
986 var.data.old_set_(var.data.__class__(0))
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
989 id=var.data.id,
990 owners=var.owners,
--> 991 is_pointer=True)
992 return var
993
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/workers.py in register_object(self, worker, obj, force_attach_to_worker, temporary, **kwargs)
508 raise RuntimeError(
509 'Invalid registry: is_pointer is {} but owners is {} on tensor {}'.format(
--> 510 obj.is_pointer, obj.owners, obj.id))
511 # print("setting object:" + str(obj.id))
512 self.set_obj(obj.id, obj, force=force_attach_to_worker, tmp=temporary)
RuntimeError: Invalid registry: is_pointer is True but owners is [0] on tensor 2841986768
|
RuntimeError
|
def register_object(
self, worker, obj, force_attach_to_worker=False, temporary=False, **kwargs
):
"""
Registers an object with the current worker node. Selects an
id for the object, assigns a list of owners,
and establishes whether it's a pointer or not. This method
is generally not used by the client and is
instead used by interal processes (hooks and workers).
:Parameters:
* **obj (a torch.Tensor or torch.autograd.Variable)** a Torch
instance, e.g. Tensor or Variable to be registered
* **force_attach_to_worker (bool)** if set to True, it will
force the object to be stored in the worker's permanent registry
* **temporary (bool)** If set to True, it will store the object
in the worker's temporary registry.
:kwargs:
* **id (int or string)** random integer between 0 and 1e10 or
string uniquely identifying the object.
* **owners (list of ** :class:`BaseWorker` objects ** or ids)**
owner(s) of the object
* **is_pointer (bool, optional)** Whether or not the tensor being
registered contains the data locally or is instead a pointer to
a tensor that lives on a different worker.
"""
# TODO: Assign default id more intelligently (low priority)
# Consider popping id from long list of unique integers
keys = kwargs.keys()
# DO NOT DELETE THIS TRY/CATCH UNLESS YOU KNOW WHAT YOU'RE DOING
# PyTorch tensors wrapped invariables (if my_var.data) are python
# objects that get deleted and re-created randomly according to
# the whims of the PyTorch wizards. Thus, our attributes were getting
# deleted with them (because they are not present in the underlying
# C++ code.) Thus, so that these python objects do NOT get garbage
# collected, we're creating a secondary reference to them from the
# parent Variable object (which we have been told is stable). This
# is experimental functionality but seems to solve the symptoms we
# were previously experiencing.
try:
obj.data_backup = obj.data
except:
""
obj.id = (
kwargs["id"]
if ("id" in keys and kwargs["id"] is not None)
else random.randint(0, 1e10)
)
obj.owners = kwargs["owners"] if "owners" in keys else [worker.id]
# check to see if we can resolve owner id to pointer
owner_pointers = list()
for owner in obj.owners:
if owner in self._known_workers.keys():
owner_pointers.append(self._known_workers[owner])
else:
owner_pointers.append(owner)
obj.owners = owner_pointers
obj.is_pointer = kwargs["is_pointer"] if "is_pointer" in keys else False
mal_points_away = obj.is_pointer and worker.id in obj.owners
# print("Mal Points Away:" + str(mal_points_away))
# print("self.local_worker.id in obj.owners == " + str(self.local_worker.id in obj.owners))
# The following was meant to assure that we didn't try to
# register objects we didn't have. We end up needing to register
# objects with non-local owners on the worker side before sending
# things off, so it's been relaxed. Consider using a 'strict'
# kwarg for strict checking of this stuff
mal_points_here = False
# mal_points_here = not obj.is_pointer and self.local_worker.id not in obj.owners
if mal_points_away or mal_points_here:
raise RuntimeError(
"Invalid registry: is_pointer is {} but owners is {} on tensor {}".format(
obj.is_pointer, obj.owners, obj.id
)
)
# print("setting object:" + str(obj.id))
self.set_obj(obj.id, obj, force=force_attach_to_worker, tmp=temporary)
# Perform recursive operations.
# If there is a child tensor (self.data)
if hasattr(obj, "grad"):
if obj.grad is not None:
self.register_object(
worker=worker,
obj=obj.grad,
force_attach_to_worker=force_attach_to_worker,
temporary=temporary,
id=obj.grad.id,
owners=obj.owners,
is_pointer=obj.is_pointer,
)
try:
_ = obj.data
_ = str(_) # just a style issue
if obj.data is not None:
self.register_object(
worker=worker,
obj=obj.data,
force_attach_to_worker=force_attach_to_worker,
temporary=temporary,
id=obj.data.id,
owners=obj.owners,
is_pointer=obj.is_pointer,
)
except RuntimeError:
""
return obj
|
def register_object(
self, worker, obj, force_attach_to_worker=False, temporary=False, **kwargs
):
"""
Registers an object with the current worker node. Selects an
id for the object, assigns a list of owners,
and establishes whether it's a pointer or not. This method
is generally not used by the client and is
instead used by interal processes (hooks and workers).
:Parameters:
* **obj (a torch.Tensor or torch.autograd.Variable)** a Torch
instance, e.g. Tensor or Variable to be registered
* **force_attach_to_worker (bool)** if set to True, it will
force the object to be stored in the worker's permanent registry
* **temporary (bool)** If set to True, it will store the object
in the worker's temporary registry.
:kwargs:
* **id (int or string)** random integer between 0 and 1e10 or
string uniquely identifying the object.
* **owners (list of ** :class:`BaseWorker` objects ** or ids)**
owner(s) of the object
* **is_pointer (bool, optional)** Whether or not the tensor being
registered contains the data locally or is instead a pointer to
a tensor that lives on a different worker.
"""
# TODO: Assign default id more intelligently (low priority)
# Consider popping id from long list of unique integers
keys = kwargs.keys()
# DO NOT DELETE THIS TRY/CATCH UNLESS YOU KNOW WHAT YOU'RE DOING
# PyTorch tensors wrapped invariables (if my_var.data) are python
# objects that get deleted and re-created randomly according to
# the whims of the PyTorch wizards. Thus, our attributes were getting
# deleted with them (because they are not present in the underlying
# C++ code.) Thus, so that these python objects do NOT get garbage
# collected, we're creating a secondary reference to them from the
# parent Variable object (which we have been told is stable). This
# is experimental functionality but seems to solve the symptoms we
# were previously experiencing.
try:
obj.data_backup = obj.data
except:
""
obj.id = (
kwargs["id"]
if ("id" in keys and kwargs["id"] is not None)
else random.randint(0, 1e10)
)
obj.owners = kwargs["owners"] if "owners" in keys else [worker.id]
# check to see if we can resolve owner id to pointer
owner_pointers = list()
for owner in obj.owners:
if owner in self._known_workers.keys():
owner_pointers.append(self._known_workers[owner])
else:
owner_pointers.append(owner)
obj.owners = owner_pointers
obj.is_pointer = kwargs["is_pointer"] if "is_pointer" in keys else False
mal_points_away = obj.is_pointer and worker.id in obj.owners
# print("Mal Points Away:" + str(mal_points_away))
# print("self.local_worker.id in obj.owners == " + str(self.local_worker.id in obj.owners))
# The following was meant to assure that we didn't try to
# register objects we didn't have. We end up needing to register
# objects with non-local owners on the worker side before sending
# things off, so it's been relaxed. Consider using a 'strict'
# kwarg for strict checking of this stuff
mal_points_here = False
# mal_points_here = not obj.is_pointer and self.local_worker.id not in obj.owners
if mal_points_away or mal_points_here:
raise RuntimeError(
"Invalid registry: is_pointer is {} but owners is {}".format(
obj.is_pointer, obj.owners
)
)
# print("setting object:" + str(obj.id))
self.set_obj(obj.id, obj, force=force_attach_to_worker, tmp=temporary)
return obj
|
https://github.com/OpenMined/PySyft/issues/1350
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-14-4207de4e45fd> in <module>()
10 loss.backward()
11
---> 12 model.send_(remote)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in send_(self, workers)
907 owners=self.owners, is_pointer=True)
908
--> 909 return hook_self._var_to_pointer(self, hook_self)
910
911 setattr(torch.autograd.variable.Variable, 'send_', send_)
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
982 print("_var_to_pointer:" + str(var.data.id))
983 if var.grad is not None:
--> 984 self._var_to_pointer(var.grad, hook_self)
985
986 var.data.old_set_(var.data.__class__(0))
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/hooks.py in _var_to_pointer(self, var, hook_self)
989 id=var.data.id,
990 owners=var.owners,
--> 991 is_pointer=True)
992 return var
993
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.0-py3.6.egg/syft/core/workers.py in register_object(self, worker, obj, force_attach_to_worker, temporary, **kwargs)
508 raise RuntimeError(
509 'Invalid registry: is_pointer is {} but owners is {} on tensor {}'.format(
--> 510 obj.is_pointer, obj.owners, obj.id))
511 # print("setting object:" + str(obj.id))
512 self.set_obj(obj.id, obj, force=force_attach_to_worker, tmp=temporary)
RuntimeError: Invalid registry: is_pointer is True but owners is [0] on tensor 2841986768
|
RuntimeError
|
def gather_elements(self, client, node, style):
# Take the style from the parent "table" node
# because sometimes it's not passed down.
if node.parent["classes"]:
style = client.styles.combinedStyle(["table"] + node.parent["classes"])
else:
style = client.styles["table"]
rows = []
colWidths = []
hasHead = False
headRows = 0
for n in node.children:
if isinstance(n, docutils.nodes.thead):
hasHead = True
for row in n.children:
r = []
for cell in row.children:
r.append(cell)
rows.append(r)
headRows = len(rows)
elif isinstance(n, docutils.nodes.tbody):
for row in n.children:
r = []
for cell in row.children:
r.append(cell)
rows.append(r)
elif isinstance(n, docutils.nodes.colspec):
colWidths.append(int(n["colwidth"]))
# colWidths are in no specific unit, really. Maybe ems.
tot = sum(colWidths)
adjustedWidths = []
for x in colWidths:
# Convert them to %
w = 100 * x / tot
# Limit minimum width of a column as narrow columns cause strange "huge height" from reportlab if the cell
# padding is larger than the calculated width. Hack for #967
if w < 4:
w = 4
adjustedWidths.append("%s%%" % w)
colWidths = adjustedWidths
if "colWidths" in style.__dict__:
colWidths[: len(style.colWidths)] = style.colWidths
spans = client.filltable(rows)
data = []
rowids = range(0, len(rows))
for row, i in zip(rows, rowids):
r = []
j = 0
for cell in row:
if isinstance(cell, str):
r.append("")
else:
if i < headRows:
st = client.styles["table-heading"]
else:
st = client.styles["table-body"]
ell = client.gather_elements(cell, style=st)
r.append(ell)
j += 1
data.append(r)
st = TableStyle(spans)
if "commands" in style.__dict__:
for cmd in style.commands:
st.add(*cmd)
else:
# Only use the commands from "table" if the
# specified class has no commands.
for cmd in client.styles["table"].commands:
st.add(*cmd)
if hasHead:
for cmd in client.styles.tstyleHead(headRows):
st.add(*cmd)
rtr = client.repeat_table_rows
t = DelayedTable(data, colWidths, st, rtr)
if style.alignment == TA_LEFT:
t.hAlign = "LEFT"
elif style.alignment == TA_CENTER:
t.hAlign = "CENTER"
elif style.alignment == TA_RIGHT:
t.hAlign = "RIGHT"
return [t]
|
def gather_elements(self, client, node, style):
# Take the style from the parent "table" node
# because sometimes it's not passed down.
if node.parent["classes"]:
style = client.styles.combinedStyle(["table"] + node.parent["classes"])
else:
style = client.styles["table"]
rows = []
colWidths = []
hasHead = False
headRows = 0
for n in node.children:
if isinstance(n, docutils.nodes.thead):
hasHead = True
for row in n.children:
r = []
for cell in row.children:
r.append(cell)
rows.append(r)
headRows = len(rows)
elif isinstance(n, docutils.nodes.tbody):
for row in n.children:
r = []
for cell in row.children:
r.append(cell)
rows.append(r)
elif isinstance(n, docutils.nodes.colspec):
colWidths.append(int(n["colwidth"]))
# colWidths are in no specific unit, really. Maybe ems.
# Convert them to %
colWidths = [int(x) for x in colWidths]
tot = sum(colWidths)
colWidths = ["%s%%" % ((100.0 * w) / tot) for w in colWidths]
if "colWidths" in style.__dict__:
colWidths[: len(style.colWidths)] = style.colWidths
spans = client.filltable(rows)
data = []
rowids = range(0, len(rows))
for row, i in zip(rows, rowids):
r = []
j = 0
for cell in row:
if isinstance(cell, str):
r.append("")
else:
if i < headRows:
st = client.styles["table-heading"]
else:
st = client.styles["table-body"]
ell = client.gather_elements(cell, style=st)
r.append(ell)
j += 1
data.append(r)
st = TableStyle(spans)
if "commands" in style.__dict__:
for cmd in style.commands:
st.add(*cmd)
else:
# Only use the commands from "table" if the
# specified class has no commands.
for cmd in client.styles["table"].commands:
st.add(*cmd)
if hasHead:
for cmd in client.styles.tstyleHead(headRows):
st.add(*cmd)
rtr = client.repeat_table_rows
t = DelayedTable(data, colWidths, st, rtr)
if style.alignment == TA_LEFT:
t.hAlign = "LEFT"
elif style.alignment == TA_CENTER:
t.hAlign = "CENTER"
elif style.alignment == TA_RIGHT:
t.hAlign = "RIGHT"
return [t]
|
https://github.com/rst2pdf/rst2pdf/issues/967
|
Traceback (most recent call last):
File "/usr/bin/rst2pdf", line 8, in <module>
sys.exit(main())
File "/usr/lib/python3.9/site-packages/rst2pdf/createpdf.py", line 1683, in main
RstToPdf(
File "/usr/lib/python3.9/site-packages/rst2pdf/createpdf.py", line 659, in createPdf
pdfdoc.multiBuild(elements)
File "/usr/lib/python3.9/site-packages/reportlab/platypus/doctemplate.py", line 1167, in multiBuild
self.build(tempStory, **buildKwds)
File "/usr/lib/python3.9/site-packages/reportlab/platypus/doctemplate.py", line 1080, in build
self.handle_flowable(flowables)
File "/usr/lib/python3.9/site-packages/rst2pdf/createpdf.py", line 813, in handle_flowable
self.handle_frameEnd()
File "/usr/lib/python3.9/site-packages/reportlab/platypus/doctemplate.py", line 726, in handle_frameEnd
self.handle_pageEnd()
File "/usr/lib/python3.9/site-packages/reportlab/platypus/doctemplate.py", line 668, in handle_pageEnd
raise LayoutError(ident)
reportlab.platypus.doctemplate.LayoutError: More than 10 pages generated without content - halting layout. Likely that a flowable is too large for any frame.
|
reportlab.platypus.doctemplate.LayoutError
|
def add_extensions(options):
extensions = []
for ext in options.extensions:
if not ext.startswith("!"):
extensions.append(ext)
continue
ext = ext[1:]
try:
extensions.remove(ext)
except ValueError:
log.warning(
"Could not remove extension %s -- no such extension installed" % ext
)
else:
log.info("Removed extension %s" % ext)
options.extensions[:] = extensions
if not extensions:
return
class ModuleProxy(object):
def __init__(self):
self.__dict__ = globals()
createpdf = ModuleProxy()
for modname in options.extensions:
prefix, modname = os.path.split(modname)
path_given = prefix
if modname.endswith(".py"):
modname = modname[:-3]
path_given = True
if not prefix:
prefix = os.path.join(os.path.dirname(__file__), "extensions")
if prefix not in sys.path:
sys.path.append(prefix)
prefix = os.getcwd()
if prefix not in sys.path:
sys.path.insert(0, prefix)
log.info("Importing extension module %s", repr(modname))
firstname = path_given and modname or (modname + "_r2p")
_names = [firstname, modname]
import_exc = None
for _name in _names:
try:
module = import_module(_name, "rst2pdf")
break
except ImportError as e:
import_exc = e
else:
if not import_exc:
continue
if str(import_exc).split()[-1].replace("'", "") not in [firstname, modname]:
raise import_exc
raise SystemExit(
"\nError: Could not find module %s in sys.path [\n %s\n]\n"
"Exiting...\n" % (modname, ",\n ".join(sys.path))
)
if hasattr(module, "install"):
module.install(createpdf, options)
|
def add_extensions(options):
extensions = []
for ext in options.extensions:
if not ext.startswith("!"):
extensions.append(ext)
continue
ext = ext[1:]
try:
extensions.remove(ext)
except ValueError:
log.warning(
"Could not remove extension %s -- no such extension installed" % ext
)
else:
log.info("Removed extension %s" % ext)
options.extensions[:] = extensions
if not extensions:
return
class ModuleProxy(object):
def __init__(self):
self.__dict__ = globals()
createpdf = ModuleProxy()
for modname in options.extensions:
prefix, modname = os.path.split(modname)
path_given = prefix
if modname.endswith(".py"):
modname = modname[:-3]
path_given = True
if not prefix:
prefix = os.path.join(os.path.dirname(__file__), "extensions")
if prefix not in sys.path:
sys.path.append(prefix)
prefix = os.getcwd()
if prefix not in sys.path:
sys.path.insert(0, prefix)
log.info("Importing extension module %s", repr(modname))
firstname = path_given and modname or (modname + "_r2p")
_names = [firstname, modname]
try:
for _name in _names:
try:
module = import_module(_name, "rst2pdf")
except ImportError:
continue
except ImportError as e:
if str(e).split()[-1].replace("'", "") not in [firstname, modname]:
raise
raise SystemExit(
"\nError: Could not find module %s "
"in sys.path [\n %s\n]\nExiting...\n"
% (modname, ",\n ".join(sys.path))
)
if hasattr(module, "install"):
module.install(createpdf, options)
|
https://github.com/rst2pdf/rst2pdf/issues/920
|
[ERROR] pdfbuilder.py:149 local variable 'module' referenced before assignment
Traceback (most recent call last):
File "/home/dk/.local/lib/python3.7/site-packages/rst2pdf/pdfbuilder.py", line 107, in write
createpdf.add_extensions(dummy())
File "/home/dk/.local/lib/python3.7/site-packages/rst2pdf/createpdf.py", line 1595, in add_extensions
if hasattr(module, 'install'):
UnboundLocalError: local variable 'module' referenced before assignment
|
UnboundLocalError
|
def get_pre_post(self, client, node, replaceEnt):
pre = ""
if node["ids"]:
if node["ids"][0] not in client.targets:
pre = '<a name="%s"/>' % node["ids"][0]
client.targets.append(node["ids"][0])
else:
name = node["names"][0]
if name:
pre = '<a name="%s"/>' % name
client.targets.append(name)
return pre, ""
|
def get_pre_post(self, client, node, replaceEnt):
pre = ""
if node["ids"]:
if node["ids"][0] not in client.targets:
pre = '<a name="%s"/>' % node["ids"][0]
client.targets.append(node["ids"][0])
else:
for attr in ["refid", "refuri"]:
value = node.get(attr)
if value:
pre = '<a name="%s"/>' % value
client.targets.append(value)
break
else:
raise ValueError(
"Target node '%s' doesn't have neither 'refid' or 'refuri'" % node
)
return pre, ""
|
https://github.com/rst2pdf/rst2pdf/issues/820
|
Traceback (most recent call last):
File "/usr/local/bin/rst2pdf", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/createpdf.py", line 1511, in main
compressed=options.compressed)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/createpdf.py", line 570, in createPdf
elements = self.gen_elements(self.doctree)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/basenodehandler.py", line 251, in elemdispatch
elements = self.getelements(client, node, style)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/basenodehandler.py", line 226, in getelements
elements = self.gather_elements(client, node, style)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/basenodehandler.py", line 206, in gather_elements
return client.gather_elements(node, style=style)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/createpdf.py", line 400, in gather_elements
r.extend(self.gen_elements(n, style=style))
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/basenodehandler.py", line 251, in elemdispatch
elements = self.getelements(client, node, style)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/basenodehandler.py", line 226, in getelements
elements = self.gather_elements(client, node, style)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/genelements.py", line 188, in gather_elements
return [Paragraph(client.gen_pdftext(node), style)]
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/basenodehandler.py", line 281, in textdispatch
text = self.get_text(client, node, replaceEnt)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/basenodehandler.py", line 267, in get_text
return client.gather_pdftext(node)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/createpdf.py", line 390, in gather_pdftext
for n in node.children])
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/basenodehandler.py", line 280, in textdispatch
pre, post = self.get_pre_post(client, node, replaceEnt)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/genpdftext.py", line 237, in get_pre_post
pre = u'<a name="%s"/>' % node['refuri']
File "/usr/local/lib/python2.7/dist-packages/docutils/nodes.py", line 585, in __getitem__
return self.attributes[key]
KeyError: 'refuri'
|
KeyError
|
def get_pre_post(self, client, node, replaceEnt):
pre = ""
if node["ids"]:
if node["ids"][0] not in client.targets:
pre = '<a name="%s"/>' % node["ids"][0]
client.targets.append(node["ids"][0])
else:
for attr in ["refid", "refuri"]:
value = node.get(attr)
if value:
pre = '<a name="%s"/>' % value
client.targets.append(value)
break
else:
raise ValueError(
"Target node '%s' doesn't have neither 'refid' or 'refuri'" % node
)
return pre, ""
|
def get_pre_post(self, client, node, replaceEnt):
pre = ""
if node["ids"]:
if node["ids"][0] not in client.targets:
pre = '<a name="%s"/>' % node["ids"][0]
client.targets.append(node["ids"][0])
else:
pre = '<a name="%s"/>' % node["refuri"]
client.targets.append(node["refuri"])
return pre, ""
|
https://github.com/rst2pdf/rst2pdf/issues/820
|
Traceback (most recent call last):
File "/usr/local/bin/rst2pdf", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/createpdf.py", line 1511, in main
compressed=options.compressed)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/createpdf.py", line 570, in createPdf
elements = self.gen_elements(self.doctree)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/basenodehandler.py", line 251, in elemdispatch
elements = self.getelements(client, node, style)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/basenodehandler.py", line 226, in getelements
elements = self.gather_elements(client, node, style)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/basenodehandler.py", line 206, in gather_elements
return client.gather_elements(node, style=style)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/createpdf.py", line 400, in gather_elements
r.extend(self.gen_elements(n, style=style))
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/basenodehandler.py", line 251, in elemdispatch
elements = self.getelements(client, node, style)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/basenodehandler.py", line 226, in getelements
elements = self.gather_elements(client, node, style)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/genelements.py", line 188, in gather_elements
return [Paragraph(client.gen_pdftext(node), style)]
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/basenodehandler.py", line 281, in textdispatch
text = self.get_text(client, node, replaceEnt)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/basenodehandler.py", line 267, in get_text
return client.gather_pdftext(node)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/createpdf.py", line 390, in gather_pdftext
for n in node.children])
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/basenodehandler.py", line 280, in textdispatch
pre, post = self.get_pre_post(client, node, replaceEnt)
File "/usr/local/lib/python2.7/dist-packages/rst2pdf/genpdftext.py", line 237, in get_pre_post
pre = u'<a name="%s"/>' % node['refuri']
File "/usr/local/lib/python2.7/dist-packages/docutils/nodes.py", line 585, in __getitem__
return self.attributes[key]
KeyError: 'refuri'
|
KeyError
|
def read(self):
return self._str
|
def read(self):
return self
|
https://github.com/rst2pdf/rst2pdf/issues/785
|
$ rst2pdf test.rst -e preprocess -v
[INFO] createpdf.py:1578 Importing extension module 'vectorpdf'
[INFO] createpdf.py:1578 Importing extension module 'preprocess'
Traceback (most recent call last):
File "/Users/rob/.pyenv/versions/rst2pdf-2/bin/rst2pdf", line 11, in <module>
load_entry_point('rst2pdf', 'console_scripts', 'rst2pdf')()
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/createpdf.py", line 1461, in main
add_extensions(options)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/createpdf.py", line 1592, in add_extensions
module.install(createpdf, options)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/extensions/preprocess_r2p.py", line 367, in install
data = Preprocess(options.infile)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/extensions/preprocess_r2p.py", line 153, in __init__
self.sourcef = DummyFile(source)
UnicodeEncodeError: 'ascii' codec can't encode characters in position 44-46: ordinal not in range(128)
|
UnicodeEncodeError
|
def __init__(self, sourcef, incfile=False, widthcount=0):
"""Process a file and decorate the resultant Preprocess instance with
self.result (the preprocessed file) and self.styles (extracted stylesheet
information) for the caller.
"""
# fix keywords dict for use by the parser.
self.keywords = dict(
[(x + "::", getattr(self, "handle_" + x)) for x in self.keywords]
)
self.widthcount = widthcount
name = sourcef.name
source = sourcef.read()
if isinstance(source, bytes):
source = source.decode("utf8")
source = source.replace("\r\n", "\n").replace("\r", "\n")
# Make the determination if an include file is a stylesheet or
# another restructured text file, and handle stylesheets appropriately.
if incfile:
try:
self.styles = styles = rson_loads(source)
substyles = styles.get("styles")
if substyles is not None:
styles["styles"] = dict(substyles)
except:
pass
else:
self.changed = True
self.keep = False
return
# Read the whole file and wrap it in a DummyFile
self.sourcef = DummyFile(name, source)
# Use a regular expression on the source, to take it apart
# and put it back together again.
self.source = source = [x for x in self.splitter(source) if x]
self.result = result = []
self.styles = {}
self.changed = False
# More efficient to pop() a list than to keep taking tokens from [0]
source.reverse()
isblank = False
keywords = self.keywords
handle_single = keywords["single::"]
while source:
wasblank = isblank
isblank = False
chunk = source.pop()
result.append(chunk)
# Only process single lines
if not chunk.endswith("\n"):
continue
result[-1] = chunk[:-1]
if chunk.index("\n") != len(chunk) - 1:
continue
# Parse the line to look for one of our keywords.
tokens = chunk.split()
isblank = not tokens
if len(tokens) >= 2 and tokens[0] == ".." and tokens[1].endswith("::"):
func = keywords.get(tokens[1])
if func is None:
continue
chunk = chunk.split("::", 1)[1]
elif (
wasblank and len(tokens) == 1 and chunk[0].isalpha() and tokens[0].isalpha()
):
func = handle_single
chunk = tokens[0]
else:
continue
result.pop()
func(chunk.strip())
# Determine if we actually did anything or not. Just use our source file
# if not. Otherwise, write the results to disk (so the user can use them
# for debugging) and return them.
if self.changed:
result.append("")
result = DummyFile(name + ".build_temp", "\n".join(result))
self.keep = keep = len(result.read().strip())
if keep:
f = open(result.name, "w")
# Can call read a second time here because it's a DummyFile:
f.write(result.read())
f.close()
self.result = result
else:
self.result = self.sourcef
|
def __init__(self, sourcef, incfile=False, widthcount=0):
"""Process a file and decorate the resultant Preprocess instance with
self.result (the preprocessed file) and self.styles (extracted stylesheet
information) for the caller.
"""
# fix keywords dict for use by the parser.
self.keywords = dict(
[(x + "::", getattr(self, "handle_" + x)) for x in self.keywords]
)
self.widthcount = widthcount
name = sourcef.name
source = sourcef.read()
if isinstance(source, bytes):
source = source.decode("utf8")
source = source.replace("\r\n", "\n").replace("\r", "\n")
# Make the determination if an include file is a stylesheet or
# another restructured text file, and handle stylesheets appropriately.
if incfile:
try:
self.styles = styles = rson_loads(source)
substyles = styles.get("styles")
if substyles is not None:
styles["styles"] = dict(substyles)
except:
pass
else:
self.changed = True
self.keep = False
return
# Read the whole file and wrap it in a DummyFile
self.sourcef = DummyFile(source)
self.sourcef.name = name
# Use a regular expression on the source, to take it apart
# and put it back together again.
self.source = source = [x for x in self.splitter(source) if x]
self.result = result = []
self.styles = {}
self.changed = False
# More efficient to pop() a list than to keep taking tokens from [0]
source.reverse()
isblank = False
keywords = self.keywords
handle_single = keywords["single::"]
while source:
wasblank = isblank
isblank = False
chunk = source.pop()
result.append(chunk)
# Only process single lines
if not chunk.endswith("\n"):
continue
result[-1] = chunk[:-1]
if chunk.index("\n") != len(chunk) - 1:
continue
# Parse the line to look for one of our keywords.
tokens = chunk.split()
isblank = not tokens
if len(tokens) >= 2 and tokens[0] == ".." and tokens[1].endswith("::"):
func = keywords.get(tokens[1])
if func is None:
continue
chunk = chunk.split("::", 1)[1]
elif (
wasblank and len(tokens) == 1 and chunk[0].isalpha() and tokens[0].isalpha()
):
func = handle_single
chunk = tokens[0]
else:
continue
result.pop()
func(chunk.strip())
# Determine if we actually did anything or not. Just use our source file
# if not. Otherwise, write the results to disk (so the user can use them
# for debugging) and return them.
if self.changed:
result.append("")
result = DummyFile("\n".join(result))
result.name = name + ".build_temp"
self.keep = keep = len(result.strip())
if keep:
f = open(result.name, "w")
f.write(result)
f.close()
self.result = result
else:
self.result = self.sourcef
|
https://github.com/rst2pdf/rst2pdf/issues/785
|
$ rst2pdf test.rst -e preprocess -v
[INFO] createpdf.py:1578 Importing extension module 'vectorpdf'
[INFO] createpdf.py:1578 Importing extension module 'preprocess'
Traceback (most recent call last):
File "/Users/rob/.pyenv/versions/rst2pdf-2/bin/rst2pdf", line 11, in <module>
load_entry_point('rst2pdf', 'console_scripts', 'rst2pdf')()
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/createpdf.py", line 1461, in main
add_extensions(options)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/createpdf.py", line 1592, in add_extensions
module.install(createpdf, options)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/extensions/preprocess_r2p.py", line 367, in install
data = Preprocess(options.infile)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/extensions/preprocess_r2p.py", line 153, in __init__
self.sourcef = DummyFile(source)
UnicodeEncodeError: 'ascii' codec can't encode characters in position 44-46: ordinal not in range(128)
|
UnicodeEncodeError
|
def __init__(
self,
filename,
width=None,
height=None,
kind="direct",
mask="auto",
lazy=1,
client=None,
target=None,
):
# Client is mandatory. Perhaps move it farther up if we refactor
assert client is not None
self.__kind = kind
if filename.split("://")[0].lower() in ("http", "ftp", "https"):
try:
filename2, _ = urlretrieve(filename)
if filename != filename2:
client.to_unlink.append(filename2)
filename = filename2
except IOError:
filename = missing
self.filename, self._backend = self.get_backend(filename, client)
srcinfo = client, self.filename
if kind == "percentage_of_container":
self.image = self._backend(
self.filename, width, height, "direct", mask, lazy, srcinfo
)
self.image.drawWidth = width
self.image.drawHeight = height
self.__width = width
self.__height = height
else:
self.image = self._backend(
self.filename, width, height, kind, mask, lazy, srcinfo
)
self.__ratio = float(self.image.imageWidth) / self.image.imageHeight
self.__wrappedonce = False
self.target = target
|
def __init__(
self,
filename,
width=None,
height=None,
kind="direct",
mask="auto",
lazy=1,
client=None,
target=None,
):
# Client is mandatory. Perhaps move it farther up if we refactor
assert client is not None
self.__kind = kind
if filename.split("://")[0].lower() in ("http", "ftp", "https"):
try:
filename2, _ = urllib.urlretrieve(filename)
if filename != filename2:
client.to_unlink.append(filename2)
filename = filename2
except IOError:
filename = missing
self.filename, self._backend = self.get_backend(filename, client)
srcinfo = client, self.filename
if kind == "percentage_of_container":
self.image = self._backend(
self.filename, width, height, "direct", mask, lazy, srcinfo
)
self.image.drawWidth = width
self.image.drawHeight = height
self.__width = width
self.__height = height
else:
self.image = self._backend(
self.filename, width, height, kind, mask, lazy, srcinfo
)
self.__ratio = float(self.image.imageWidth) / self.image.imageHeight
self.__wrappedonce = False
self.target = target
|
https://github.com/rst2pdf/rst2pdf/issues/780
|
Traceback (most recent call last):
File "/home/sroeca/.pyenv/versions/test-rst-mzrgg/bin/rst2pdf", line 11, in <module>
load_entry_point('rst2pdf', 'console_scripts', 'rst2pdf')()
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/createpdf.py", line 1496, in main
compressed=options.compressed)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/createpdf.py", line 553, in createPdf
elements = self.gen_elements(self.doctree)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 253, in elemdispatch
elements = self.getelements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 228, in getelements
elements = self.gather_elements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 208, in gather_elements
return client.gather_elements(node, style=style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/createpdf.py", line 385, in gather_elements
r.extend(self.gen_elements(n, style=style))
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 253, in elemdispatch
elements = self.getelements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 228, in getelements
elements = self.gather_elements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/genelements.py", line 432, in gather_elements
elements = client.gather_elements(node)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/createpdf.py", line 385, in gather_elements
r.extend(self.gen_elements(n, style=style))
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 253, in elemdispatch
elements = self.getelements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 228, in getelements
elements = self.gather_elements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/genpdftext.py", line 158, in gather_elements
w, h, kind = MyImage.size_for_node(node, client=client)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/image.py", line 223, in size_for_node
uri, _ = urllib.urlretrieve(uri)
AttributeError: module 'urllib' has no attribute 'urlretrieve'
|
AttributeError
|
def size_for_node(self, node, client):
"""Given a docutils image node, returns the size the image should have
in the PDF document, and what 'kind' of size that is.
That involves lots of guesswork"""
uri = str(node.get("uri"))
if uri.split("://")[0].lower() not in ("http", "ftp", "https"):
uri = os.path.join(client.basedir, uri)
else:
uri, _ = urlretrieve(uri)
client.to_unlink.append(uri)
srcinfo = client, uri
# Extract all the information from the URI
imgname, extension, options = self.split_uri(uri)
if not os.path.isfile(imgname):
imgname = missing
scale = float(node.get("scale", 100)) / 100
size_known = False
# Figuring out the size to display of an image is ... annoying.
# If the user provides a size with a unit, it's simple, adjustUnits
# will return it in points and we're done.
# However, often the unit wil be "%" (specially if it's meant for
# HTML originally. In which case, we will use a percentage of
# the containing frame.
# Find the image size in pixels:
kind = "direct"
xdpi, ydpi = client.styles.def_dpi, client.styles.def_dpi
extension = imgname.split(".")[-1].lower()
if extension in ["svg", "svgz"]:
iw, ih = SVGImage(imgname, srcinfo=srcinfo).wrap(0, 0)
# These are in pt, so convert to px
iw = iw * xdpi / 72
ih = ih * ydpi / 72
elif extension == "pdf":
if VectorPdf is not None:
xobj = VectorPdf.load_xobj(srcinfo)
iw, ih = xobj.w, xobj.h
else:
reader = pdfinfo.PdfFileReader(open(imgname, "rb"))
box = [float(x) for x in reader.getPage(0)["/MediaBox"]]
iw, ih = x2 - x1, y2 - y1
# These are in pt, so convert to px
iw = iw * xdpi / 72.0
ih = ih * ydpi / 72.0
size_known = True # Assume size from original PDF is OK
else:
keeptrying = True
if PILImage:
try:
img = PILImage.open(imgname)
img.load()
iw, ih = img.size
xdpi, ydpi = img.info.get("dpi", (xdpi, ydpi))
keeptrying = False
except IOError: # PIL throws this when it's a broken/unknown image
pass
if keeptrying:
if extension not in ["jpg", "jpeg"]:
log.error(
"The image (%s, %s) is broken or in an unknown format",
imgname,
nodeid(node),
)
raise ValueError
else:
# Can be handled by reportlab
log.warning(
"Can't figure out size of the image (%s, %s). Install PIL for better results.",
imgname,
nodeid(node),
)
iw = 1000
ih = 1000
# Try to get the print resolution from the image itself via PIL.
# If it fails, assume a DPI of 300, which is pretty much made up,
# and then a 100% size would be iw*inch/300, so we pass
# that as the second parameter to adjustUnits
#
# Some say the default DPI should be 72. That would mean
# the largest printable image in A4 paper would be something
# like 480x640. That would be awful.
#
w = node.get("width")
h = node.get("height")
if h is None and w is None: # Nothing specified
# Guess from iw, ih
log.debug(
"Using image %s without specifying size."
"Calculating based on image size at %ddpi [%s]",
imgname,
xdpi,
nodeid(node),
)
w = iw * inch / xdpi
h = ih * inch / ydpi
elif w is not None:
# Node specifies only w
# In this particular case, we want the default unit
# to be pixels so we work like rst2html
if w[-1] == "%":
kind = "percentage_of_container"
w = int(w[:-1])
else:
# This uses default DPI setting because we
# are not using the image's "natural size"
# this is what LaTeX does, according to the
# docutils mailing list discussion
w = client.styles.adjustUnits(w, client.styles.tw, default_unit="px")
if h is None:
# h is set from w with right aspect ratio
h = w * ih / iw
else:
h = client.styles.adjustUnits(h, ih * inch / ydpi, default_unit="px")
elif h is not None and w is None:
if h[-1] != "%":
h = client.styles.adjustUnits(h, ih * inch / ydpi, default_unit="px")
# w is set from h with right aspect ratio
w = h * iw / ih
else:
log.error(
"Setting height as a percentage does **not** work. "
"ignoring height parameter [%s]",
nodeid(node),
)
# Set both from image data
w = iw * inch / xdpi
h = ih * inch / ydpi
# Apply scale factor
w = w * scale
h = h * scale
# And now we have this probably completely bogus size!
log.info(
"Image %s size calculated: %fcm by %fcm [%s]",
imgname,
w / cm,
h / cm,
nodeid(node),
)
return w, h, kind
|
def size_for_node(self, node, client):
"""Given a docutils image node, returns the size the image should have
in the PDF document, and what 'kind' of size that is.
That involves lots of guesswork"""
uri = str(node.get("uri"))
if uri.split("://")[0].lower() not in ("http", "ftp", "https"):
uri = os.path.join(client.basedir, uri)
else:
uri, _ = urllib.urlretrieve(uri)
client.to_unlink.append(uri)
srcinfo = client, uri
# Extract all the information from the URI
imgname, extension, options = self.split_uri(uri)
if not os.path.isfile(imgname):
imgname = missing
scale = float(node.get("scale", 100)) / 100
size_known = False
# Figuring out the size to display of an image is ... annoying.
# If the user provides a size with a unit, it's simple, adjustUnits
# will return it in points and we're done.
# However, often the unit wil be "%" (specially if it's meant for
# HTML originally. In which case, we will use a percentage of
# the containing frame.
# Find the image size in pixels:
kind = "direct"
xdpi, ydpi = client.styles.def_dpi, client.styles.def_dpi
extension = imgname.split(".")[-1].lower()
if extension in ["svg", "svgz"]:
iw, ih = SVGImage(imgname, srcinfo=srcinfo).wrap(0, 0)
# These are in pt, so convert to px
iw = iw * xdpi / 72
ih = ih * ydpi / 72
elif extension == "pdf":
if VectorPdf is not None:
xobj = VectorPdf.load_xobj(srcinfo)
iw, ih = xobj.w, xobj.h
else:
reader = pdfinfo.PdfFileReader(open(imgname, "rb"))
box = [float(x) for x in reader.getPage(0)["/MediaBox"]]
iw, ih = x2 - x1, y2 - y1
# These are in pt, so convert to px
iw = iw * xdpi / 72.0
ih = ih * ydpi / 72.0
size_known = True # Assume size from original PDF is OK
else:
keeptrying = True
if PILImage:
try:
img = PILImage.open(imgname)
img.load()
iw, ih = img.size
xdpi, ydpi = img.info.get("dpi", (xdpi, ydpi))
keeptrying = False
except IOError: # PIL throws this when it's a broken/unknown image
pass
if keeptrying:
if extension not in ["jpg", "jpeg"]:
log.error(
"The image (%s, %s) is broken or in an unknown format",
imgname,
nodeid(node),
)
raise ValueError
else:
# Can be handled by reportlab
log.warning(
"Can't figure out size of the image (%s, %s). Install PIL for better results.",
imgname,
nodeid(node),
)
iw = 1000
ih = 1000
# Try to get the print resolution from the image itself via PIL.
# If it fails, assume a DPI of 300, which is pretty much made up,
# and then a 100% size would be iw*inch/300, so we pass
# that as the second parameter to adjustUnits
#
# Some say the default DPI should be 72. That would mean
# the largest printable image in A4 paper would be something
# like 480x640. That would be awful.
#
w = node.get("width")
h = node.get("height")
if h is None and w is None: # Nothing specified
# Guess from iw, ih
log.debug(
"Using image %s without specifying size."
"Calculating based on image size at %ddpi [%s]",
imgname,
xdpi,
nodeid(node),
)
w = iw * inch / xdpi
h = ih * inch / ydpi
elif w is not None:
# Node specifies only w
# In this particular case, we want the default unit
# to be pixels so we work like rst2html
if w[-1] == "%":
kind = "percentage_of_container"
w = int(w[:-1])
else:
# This uses default DPI setting because we
# are not using the image's "natural size"
# this is what LaTeX does, according to the
# docutils mailing list discussion
w = client.styles.adjustUnits(w, client.styles.tw, default_unit="px")
if h is None:
# h is set from w with right aspect ratio
h = w * ih / iw
else:
h = client.styles.adjustUnits(h, ih * inch / ydpi, default_unit="px")
elif h is not None and w is None:
if h[-1] != "%":
h = client.styles.adjustUnits(h, ih * inch / ydpi, default_unit="px")
# w is set from h with right aspect ratio
w = h * iw / ih
else:
log.error(
"Setting height as a percentage does **not** work. "
"ignoring height parameter [%s]",
nodeid(node),
)
# Set both from image data
w = iw * inch / xdpi
h = ih * inch / ydpi
# Apply scale factor
w = w * scale
h = h * scale
# And now we have this probably completely bogus size!
log.info(
"Image %s size calculated: %fcm by %fcm [%s]",
imgname,
w / cm,
h / cm,
nodeid(node),
)
return w, h, kind
|
https://github.com/rst2pdf/rst2pdf/issues/780
|
Traceback (most recent call last):
File "/home/sroeca/.pyenv/versions/test-rst-mzrgg/bin/rst2pdf", line 11, in <module>
load_entry_point('rst2pdf', 'console_scripts', 'rst2pdf')()
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/createpdf.py", line 1496, in main
compressed=options.compressed)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/createpdf.py", line 553, in createPdf
elements = self.gen_elements(self.doctree)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 253, in elemdispatch
elements = self.getelements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 228, in getelements
elements = self.gather_elements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 208, in gather_elements
return client.gather_elements(node, style=style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/createpdf.py", line 385, in gather_elements
r.extend(self.gen_elements(n, style=style))
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 253, in elemdispatch
elements = self.getelements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 228, in getelements
elements = self.gather_elements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/genelements.py", line 432, in gather_elements
elements = client.gather_elements(node)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/createpdf.py", line 385, in gather_elements
r.extend(self.gen_elements(n, style=style))
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 253, in elemdispatch
elements = self.getelements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 228, in getelements
elements = self.gather_elements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/genpdftext.py", line 158, in gather_elements
w, h, kind = MyImage.size_for_node(node, client=client)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/image.py", line 223, in size_for_node
uri, _ = urllib.urlretrieve(uri)
AttributeError: module 'urllib' has no attribute 'urlretrieve'
|
AttributeError
|
def size_for_node(self, node, client):
"""Given a docutils image node, returns the size the image should have
in the PDF document, and what 'kind' of size that is.
That involves lots of guesswork"""
uri = str(node.get("uri"))
if uri.split("://")[0].lower() not in ("http", "ftp", "https"):
uri = os.path.join(client.basedir, uri)
else:
uri, _ = urlretrieve(uri)
client.to_unlink.append(uri)
srcinfo = client, uri
# Extract all the information from the URI
imgname, extension, options = self.split_uri(uri)
if not os.path.isfile(imgname):
imgname = missing
scale = float(node.get("scale", 100)) / 100
size_known = False
# Figuring out the size to display of an image is ... annoying.
# If the user provides a size with a unit, it's simple, adjustUnits
# will return it in points and we're done.
# However, often the unit wil be "%" (specially if it's meant for
# HTML originally. In which case, we will use a percentage of
# the containing frame.
# Find the image size in pixels:
kind = "direct"
xdpi, ydpi = client.styles.def_dpi, client.styles.def_dpi
extension = imgname.split(".")[-1].lower()
if extension in ["svg", "svgz"]:
from .svgimage import SVGImage
iw, ih = SVGImage(imgname, srcinfo=srcinfo).wrap(0, 0)
# These are in pt, so convert to px
iw = iw * xdpi / 72
ih = ih * ydpi / 72
elif extension == "pdf":
if VectorPdf is not None:
xobj = VectorPdf.load_xobj(srcinfo)
iw, ih = xobj.w, xobj.h
else:
reader = pdfinfo.PdfFileReader(open(imgname, "rb"))
box = [float(x) for x in reader.getPage(0)["/MediaBox"]]
iw, ih = x2 - x1, y2 - y1
# These are in pt, so convert to px
iw = iw * xdpi / 72.0
ih = ih * ydpi / 72.0
size_known = True # Assume size from original PDF is OK
else:
keeptrying = True
if PILImage:
try:
img = PILImage.open(imgname)
img.load()
iw, ih = img.size
xdpi, ydpi = img.info.get("dpi", (xdpi, ydpi))
keeptrying = False
except IOError: # PIL throws this when it's a broken/unknown image
pass
if keeptrying:
if extension not in ["jpg", "jpeg"]:
log.error(
"The image (%s, %s) is broken or in an unknown format",
imgname,
nodeid(node),
)
raise ValueError
else:
# Can be handled by reportlab
log.warning(
"Can't figure out size of the image (%s, %s). Install PIL for better results.",
imgname,
nodeid(node),
)
iw = 1000
ih = 1000
# Try to get the print resolution from the image itself via PIL.
# If it fails, assume a DPI of 300, which is pretty much made up,
# and then a 100% size would be iw*inch/300, so we pass
# that as the second parameter to adjustUnits
#
# Some say the default DPI should be 72. That would mean
# the largest printable image in A4 paper would be something
# like 480x640. That would be awful.
#
w = node.get("width")
h = node.get("height")
if h is None and w is None: # Nothing specified
# Guess from iw, ih
log.debug(
"Using image %s without specifying size."
"Calculating based on image size at %ddpi [%s]",
imgname,
xdpi,
nodeid(node),
)
w = iw * inch / xdpi
h = ih * inch / ydpi
elif w is not None:
# Node specifies only w
# In this particular case, we want the default unit
# to be pixels so we work like rst2html
if w[-1] == "%":
kind = "percentage_of_container"
w = int(w[:-1])
else:
# This uses default DPI setting because we
# are not using the image's "natural size"
# this is what LaTeX does, according to the
# docutils mailing list discussion
w = client.styles.adjustUnits(w, client.styles.tw, default_unit="px")
if h is None:
# h is set from w with right aspect ratio
h = w * ih / iw
else:
h = client.styles.adjustUnits(h, ih * inch / ydpi, default_unit="px")
elif h is not None and w is None:
if h[-1] != "%":
h = client.styles.adjustUnits(h, ih * inch / ydpi, default_unit="px")
# w is set from h with right aspect ratio
w = h * iw / ih
else:
log.error(
"Setting height as a percentage does **not** work. "
"ignoring height parameter [%s]",
nodeid(node),
)
# Set both from image data
w = iw * inch / xdpi
h = ih * inch / ydpi
# Apply scale factor
w = w * scale
h = h * scale
# And now we have this probably completely bogus size!
log.info(
"Image %s size calculated: %fcm by %fcm [%s]",
imgname,
w / cm,
h / cm,
nodeid(node),
)
return w, h, kind
|
def size_for_node(self, node, client):
"""Given a docutils image node, returns the size the image should have
in the PDF document, and what 'kind' of size that is.
That involves lots of guesswork"""
uri = str(node.get("uri"))
if uri.split("://")[0].lower() not in ("http", "ftp", "https"):
uri = os.path.join(client.basedir, uri)
else:
uri, _ = urllib.urlretrieve(uri)
client.to_unlink.append(uri)
srcinfo = client, uri
# Extract all the information from the URI
imgname, extension, options = self.split_uri(uri)
if not os.path.isfile(imgname):
imgname = missing
scale = float(node.get("scale", 100)) / 100
size_known = False
# Figuring out the size to display of an image is ... annoying.
# If the user provides a size with a unit, it's simple, adjustUnits
# will return it in points and we're done.
# However, often the unit wil be "%" (specially if it's meant for
# HTML originally. In which case, we will use a percentage of
# the containing frame.
# Find the image size in pixels:
kind = "direct"
xdpi, ydpi = client.styles.def_dpi, client.styles.def_dpi
extension = imgname.split(".")[-1].lower()
if extension in ["svg", "svgz"]:
from .svgimage import SVGImage
iw, ih = SVGImage(imgname, srcinfo=srcinfo).wrap(0, 0)
# These are in pt, so convert to px
iw = iw * xdpi / 72
ih = ih * ydpi / 72
elif extension == "pdf":
if VectorPdf is not None:
xobj = VectorPdf.load_xobj(srcinfo)
iw, ih = xobj.w, xobj.h
else:
reader = pdfinfo.PdfFileReader(open(imgname, "rb"))
box = [float(x) for x in reader.getPage(0)["/MediaBox"]]
iw, ih = x2 - x1, y2 - y1
# These are in pt, so convert to px
iw = iw * xdpi / 72.0
ih = ih * ydpi / 72.0
size_known = True # Assume size from original PDF is OK
else:
keeptrying = True
if PILImage:
try:
img = PILImage.open(imgname)
img.load()
iw, ih = img.size
xdpi, ydpi = img.info.get("dpi", (xdpi, ydpi))
keeptrying = False
except IOError: # PIL throws this when it's a broken/unknown image
pass
if keeptrying:
if extension not in ["jpg", "jpeg"]:
log.error(
"The image (%s, %s) is broken or in an unknown format",
imgname,
nodeid(node),
)
raise ValueError
else:
# Can be handled by reportlab
log.warning(
"Can't figure out size of the image (%s, %s). Install PIL for better results.",
imgname,
nodeid(node),
)
iw = 1000
ih = 1000
# Try to get the print resolution from the image itself via PIL.
# If it fails, assume a DPI of 300, which is pretty much made up,
# and then a 100% size would be iw*inch/300, so we pass
# that as the second parameter to adjustUnits
#
# Some say the default DPI should be 72. That would mean
# the largest printable image in A4 paper would be something
# like 480x640. That would be awful.
#
w = node.get("width")
h = node.get("height")
if h is None and w is None: # Nothing specified
# Guess from iw, ih
log.debug(
"Using image %s without specifying size."
"Calculating based on image size at %ddpi [%s]",
imgname,
xdpi,
nodeid(node),
)
w = iw * inch / xdpi
h = ih * inch / ydpi
elif w is not None:
# Node specifies only w
# In this particular case, we want the default unit
# to be pixels so we work like rst2html
if w[-1] == "%":
kind = "percentage_of_container"
w = int(w[:-1])
else:
# This uses default DPI setting because we
# are not using the image's "natural size"
# this is what LaTeX does, according to the
# docutils mailing list discussion
w = client.styles.adjustUnits(w, client.styles.tw, default_unit="px")
if h is None:
# h is set from w with right aspect ratio
h = w * ih / iw
else:
h = client.styles.adjustUnits(h, ih * inch / ydpi, default_unit="px")
elif h is not None and w is None:
if h[-1] != "%":
h = client.styles.adjustUnits(h, ih * inch / ydpi, default_unit="px")
# w is set from h with right aspect ratio
w = h * iw / ih
else:
log.error(
"Setting height as a percentage does **not** work. "
"ignoring height parameter [%s]",
nodeid(node),
)
# Set both from image data
w = iw * inch / xdpi
h = ih * inch / ydpi
# Apply scale factor
w = w * scale
h = h * scale
# And now we have this probably completely bogus size!
log.info(
"Image %s size calculated: %fcm by %fcm [%s]",
imgname,
w / cm,
h / cm,
nodeid(node),
)
return w, h, kind
|
https://github.com/rst2pdf/rst2pdf/issues/780
|
Traceback (most recent call last):
File "/home/sroeca/.pyenv/versions/test-rst-mzrgg/bin/rst2pdf", line 11, in <module>
load_entry_point('rst2pdf', 'console_scripts', 'rst2pdf')()
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/createpdf.py", line 1496, in main
compressed=options.compressed)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/createpdf.py", line 553, in createPdf
elements = self.gen_elements(self.doctree)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 253, in elemdispatch
elements = self.getelements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 228, in getelements
elements = self.gather_elements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 208, in gather_elements
return client.gather_elements(node, style=style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/createpdf.py", line 385, in gather_elements
r.extend(self.gen_elements(n, style=style))
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 253, in elemdispatch
elements = self.getelements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 228, in getelements
elements = self.gather_elements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/genelements.py", line 432, in gather_elements
elements = client.gather_elements(node)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/createpdf.py", line 385, in gather_elements
r.extend(self.gen_elements(n, style=style))
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 253, in elemdispatch
elements = self.getelements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/basenodehandler.py", line 228, in getelements
elements = self.gather_elements(client, node, style)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/genpdftext.py", line 158, in gather_elements
w, h, kind = MyImage.size_for_node(node, client=client)
File "/home/sroeca/src/sandbox/test-rst/rst2pdf/rst2pdf/image.py", line 223, in size_for_node
uri, _ = urllib.urlretrieve(uri)
AttributeError: module 'urllib' has no attribute 'urlretrieve'
|
AttributeError
|
def findTTFont(fname):
def get_family(query):
data = subprocess.check_output(["fc-match", query])
if six.PY2:
data = data.decode("UTF-8")
for line in data.splitlines():
line = line.strip()
if not line:
continue
fname, family, _, variant = line.split('"')[:4]
family = family.replace('"', "")
if family:
return family
return None
def get_fname(query):
data = subprocess.check_output(["fc-match", "-v", query])
if six.PY2:
data = data.decode("UTF-8")
for line in data.splitlines():
line = line.strip()
if line.startswith("file: "):
return line.split('"')[1]
return None
def get_variants(family):
variants = [
get_fname(family + ":style=Roman"),
get_fname(family + ":style=Bold"),
get_fname(family + ":style=Oblique"),
get_fname(family + ":style=Bold Oblique"),
]
if variants[2] == variants[0]:
variants[2] = get_fname(family + ":style=Italic")
if variants[3] == variants[0]:
variants[3] = get_fname(family + ":style=Bold Italic")
if variants[0].endswith(".pfb") or variants[0].endswith(".gz"):
return None
return variants
if os.name != "nt":
family = get_family(fname)
if not family:
log.error("Unknown font: %s", fname)
return None
return get_variants(family)
else:
# lookup required font in registry lookup, alternative approach
# is to let loadFont() traverse windows font directory or use
# ctypes with EnumFontFamiliesEx
def get_nt_fname(ftname):
import _winreg as _w
fontkey = _w.OpenKey(
_w.HKEY_LOCAL_MACHINE,
"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts",
)
fontname = ftname + " (TrueType)"
try:
fname = _w.QueryValueEx(fontkey, fontname)[0]
if os.path.isabs(fname):
fontkey.close()
return fname
fontdir = os.environ.get("SystemRoot", "C:\\Windows")
fontdir += "\\Fonts"
fontkey.Close()
return fontdir + "\\" + fname
except WindowsError as err:
fontkey.Close()
return None
family, pos = guessFont(fname)
fontfile = get_nt_fname(fname)
if not fontfile:
if pos == 0:
fontfile = get_nt_fname(family)
elif pos == 1:
fontfile = get_nt_fname(family + " Bold")
elif pos == 2:
fontfile = get_nt_fname(family + " Italic") or get_nt_fname(
family + " Oblique"
)
else:
fontfile = get_nt_fname(family + " Bold Italic") or get_nt_fname(
family + " Bold Oblique"
)
if not fontfile:
log.error("Unknown font: %s", fname)
return None
family, pos = guessFont(fname)
variants = [
get_nt_fname(family) or fontfile,
get_nt_fname(family + " Bold") or fontfile,
get_nt_fname(family + " Italic")
or get_nt_fname(family + " Oblique")
or fontfile,
get_nt_fname(family + " Bold Italic")
or get_nt_fname(family + " Bold Oblique")
or fontfile,
]
return variants
|
def findTTFont(fname):
def get_family(query):
data = os.popen('fc-match "%s"' % query, "r").read()
for line in data.splitlines():
line = line.strip()
if not line:
continue
fname, family, _, variant = line.split('"')[:4]
family = family.replace('"', "")
if family:
return family
return None
def get_fname(query):
data = os.popen('fc-match -v "%s"' % query, "r").read()
for line in data.splitlines():
line = line.strip()
if line.startswith("file: "):
return line.split('"')[1]
return None
def get_variants(family):
variants = [
get_fname(family + ":style=Roman"),
get_fname(family + ":style=Bold"),
get_fname(family + ":style=Oblique"),
get_fname(family + ":style=Bold Oblique"),
]
if variants[2] == variants[0]:
variants[2] = get_fname(family + ":style=Italic")
if variants[3] == variants[0]:
variants[3] = get_fname(family + ":style=Bold Italic")
if variants[0].endswith(".pfb") or variants[0].endswith(".gz"):
return None
return variants
if os.name != "nt":
family = get_family(fname)
if not family:
log.error("Unknown font: %s", fname)
return None
return get_variants(family)
else:
# lookup required font in registry lookup, alternative approach
# is to let loadFont() traverse windows font directory or use
# ctypes with EnumFontFamiliesEx
def get_nt_fname(ftname):
import _winreg as _w
fontkey = _w.OpenKey(
_w.HKEY_LOCAL_MACHINE,
"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts",
)
fontname = ftname + " (TrueType)"
try:
fname = _w.QueryValueEx(fontkey, fontname)[0]
if os.path.isabs(fname):
fontkey.close()
return fname
fontdir = os.environ.get("SystemRoot", "C:\\Windows")
fontdir += "\\Fonts"
fontkey.Close()
return fontdir + "\\" + fname
except WindowsError as err:
fontkey.Close()
return None
family, pos = guessFont(fname)
fontfile = get_nt_fname(fname)
if not fontfile:
if pos == 0:
fontfile = get_nt_fname(family)
elif pos == 1:
fontfile = get_nt_fname(family + " Bold")
elif pos == 2:
fontfile = get_nt_fname(family + " Italic") or get_nt_fname(
family + " Oblique"
)
else:
fontfile = get_nt_fname(family + " Bold Italic") or get_nt_fname(
family + " Bold Oblique"
)
if not fontfile:
log.error("Unknown font: %s", fname)
return None
family, pos = guessFont(fname)
variants = [
get_nt_fname(family) or fontfile,
get_nt_fname(family + " Bold") or fontfile,
get_nt_fname(family + " Italic")
or get_nt_fname(family + " Oblique")
or fontfile,
get_nt_fname(family + " Bold Italic")
or get_nt_fname(family + " Bold Oblique")
or fontfile,
]
return variants
|
https://github.com/rst2pdf/rst2pdf/issues/757
|
$ cd doc
$ rst2pdf manual.rst --custom-cover=assets/cover.tmpl -o output/pdf/manual.pdf -s assets/manual.style -b1
Traceback (most recent call last):
File "/Users/rob/.pyenv/versions/rst2pdf/bin/rst2pdf", line 11, in <module>
load_entry_point('rst2pdf', 'console_scripts', 'rst2pdf')()
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/createpdf.py", line 1506, in main
strip_elements_with_classes=options.strip_elements_with_classes,
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/createpdf.py", line 184, in __init__
self.loadStyles(stylesheets)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/createpdf.py", line 288, in loadStyles
def_dpi=self.def_dpi)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/styles.py", line 358, in __init__
fontList = findfonts.autoEmbed(style[key])
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/findfonts.py", line 316, in autoEmbed
variants = findTTFont(fname)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/findfonts.py", line 221, in findTTFont
return get_variants(family)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/findfonts.py", line 204, in get_variants
get_fname(family + ":style=Roman"),
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/findfonts.py", line 198, in get_fname
if line.startswith("file: "):
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc4 in position 36: ordinal not in range(128)
|
UnicodeDecodeError
|
def get_family(query):
data = subprocess.check_output(["fc-match", query])
if six.PY2:
data = data.decode("UTF-8")
for line in data.splitlines():
line = line.strip()
if not line:
continue
fname, family, _, variant = line.split('"')[:4]
family = family.replace('"', "")
if family:
return family
return None
|
def get_family(query):
data = os.popen('fc-match "%s"' % query, "r").read()
for line in data.splitlines():
line = line.strip()
if not line:
continue
fname, family, _, variant = line.split('"')[:4]
family = family.replace('"', "")
if family:
return family
return None
|
https://github.com/rst2pdf/rst2pdf/issues/757
|
$ cd doc
$ rst2pdf manual.rst --custom-cover=assets/cover.tmpl -o output/pdf/manual.pdf -s assets/manual.style -b1
Traceback (most recent call last):
File "/Users/rob/.pyenv/versions/rst2pdf/bin/rst2pdf", line 11, in <module>
load_entry_point('rst2pdf', 'console_scripts', 'rst2pdf')()
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/createpdf.py", line 1506, in main
strip_elements_with_classes=options.strip_elements_with_classes,
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/createpdf.py", line 184, in __init__
self.loadStyles(stylesheets)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/createpdf.py", line 288, in loadStyles
def_dpi=self.def_dpi)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/styles.py", line 358, in __init__
fontList = findfonts.autoEmbed(style[key])
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/findfonts.py", line 316, in autoEmbed
variants = findTTFont(fname)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/findfonts.py", line 221, in findTTFont
return get_variants(family)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/findfonts.py", line 204, in get_variants
get_fname(family + ":style=Roman"),
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/findfonts.py", line 198, in get_fname
if line.startswith("file: "):
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc4 in position 36: ordinal not in range(128)
|
UnicodeDecodeError
|
def get_fname(query):
data = subprocess.check_output(["fc-match", "-v", query])
if six.PY2:
data = data.decode("UTF-8")
for line in data.splitlines():
line = line.strip()
if line.startswith("file: "):
return line.split('"')[1]
return None
|
def get_fname(query):
data = os.popen('fc-match -v "%s"' % query, "r").read()
for line in data.splitlines():
line = line.strip()
if line.startswith("file: "):
return line.split('"')[1]
return None
|
https://github.com/rst2pdf/rst2pdf/issues/757
|
$ cd doc
$ rst2pdf manual.rst --custom-cover=assets/cover.tmpl -o output/pdf/manual.pdf -s assets/manual.style -b1
Traceback (most recent call last):
File "/Users/rob/.pyenv/versions/rst2pdf/bin/rst2pdf", line 11, in <module>
load_entry_point('rst2pdf', 'console_scripts', 'rst2pdf')()
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/createpdf.py", line 1506, in main
strip_elements_with_classes=options.strip_elements_with_classes,
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/createpdf.py", line 184, in __init__
self.loadStyles(stylesheets)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/createpdf.py", line 288, in loadStyles
def_dpi=self.def_dpi)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/styles.py", line 358, in __init__
fontList = findfonts.autoEmbed(style[key])
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/findfonts.py", line 316, in autoEmbed
variants = findTTFont(fname)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/findfonts.py", line 221, in findTTFont
return get_variants(family)
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/findfonts.py", line 204, in get_variants
get_fname(family + ":style=Roman"),
File "/Users/rob/Projects/python/rst2pdf/rst2pdf/findfonts.py", line 198, in get_fname
if line.startswith("file: "):
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc4 in position 36: ordinal not in range(128)
|
UnicodeDecodeError
|
def get_pre_post(self, client, node, replaceEnt):
pre = ""
if node["ids"]:
if node["ids"][0] not in client.targets:
pre = '<a name="%s"/>' % node["ids"][0]
client.targets.append(node["ids"][0])
else:
pre = '<a name="%s"/>' % node["refuri"]
client.targets.append(node["refuri"])
return pre, ""
|
def get_pre_post(self, client, node, replaceEnt):
pre = ""
if node["ids"][0] not in client.targets:
pre = '<a name="%s"/>' % node["ids"][0]
client.targets.append(node["ids"][0])
return pre, ""
|
https://github.com/rst2pdf/rst2pdf/issues/569
|
Traceback (most recent call last):
File "/usr/bin/rst2pdf", line 9, in <module>
load_entry_point('rst2pdf===0.93.dev-r0', 'console_scripts', 'rst2pdf')()
File "/usr/lib/python2.7/site-packages/rst2pdf/createpdf.py", line 1495, in main
compressed=options.compressed)
File "/usr/lib/python2.7/site-packages/rst2pdf/createpdf.py", line 561, in createPdf
elements = self.gen_elements(self.doctree)
File "/usr/lib/python2.7/site-packages/rst2pdf/basenodehandler.py", line 250, in elemdispatch
elements = self.getelements(client, node, style)
File "/usr/lib/python2.7/site-packages/rst2pdf/basenodehandler.py", line 225, in getelements
elements = self.gather_elements(client, node, style)
File "/usr/lib/python2.7/site-packages/rst2pdf/basenodehandler.py", line 205, in gather_elements
return client.gather_elements(node, style=style)
File "/usr/lib/python2.7/site-packages/rst2pdf/createpdf.py", line 390, in gather_elements
r.extend(self.gen_elements(n, style=style))
File "/usr/lib/python2.7/site-packages/rst2pdf/basenodehandler.py", line 250, in elemdispatch
elements = self.getelements(client, node, style)
File "/usr/lib/python2.7/site-packages/rst2pdf/basenodehandler.py", line 225, in getelements
elements = self.gather_elements(client, node, style)
File "/usr/lib/python2.7/site-packages/rst2pdf/genelements.py", line 188, in gather_elements
return [Paragraph(client.gen_pdftext(node), style)]
File "/usr/lib/python2.7/site-packages/rst2pdf/basenodehandler.py", line 280, in textdispatch
text = self.get_text(client, node, replaceEnt)
File "/usr/lib/python2.7/site-packages/rst2pdf/basenodehandler.py", line 266, in get_text
return client.gather_pdftext(node)
File "/usr/lib/python2.7/site-packages/rst2pdf/createpdf.py", line 380, in gather_pdftext
for n in node.children])
File "/usr/lib/python2.7/site-packages/rst2pdf/basenodehandler.py", line 279, in textdispatch
pre, post = self.get_pre_post(client, node, replaceEnt)
File "/usr/lib/python2.7/site-packages/rst2pdf/genpdftext.py", line 225, in get_pre_post
if node['ids'][0] not in client.targets:
IndexError: list index out of range
|
IndexError
|
def get_text(self, client, node, replaceEnt):
# get style for current node
sty = client.styles.styleForNode(node)
node_fontsize = sty.fontSize
node_color = "#" + sty.textColor.hexval()[2:]
mf = math_flowable.Math(
node.math_data, label=node.label, fontsize=node_fontsize, color=node_color
)
w, h = mf.wrap(0, 0)
descent = mf.descent()
img = mf.genImage()
client.to_unlink.append(img)
return '<img src="%s" width="%f" height="%f" valign="%f"/>' % (img, w, h, -descent)
|
def get_text(self, client, node, replaceEnt):
# get style for current node
sty = client.styles.styleForNode(node)
node_fontsize = sty.fontSize
node_color = "#" + sty.textColor.hexval()[2:]
mf = math_flowable.Math(
node.math_data, label=node.label, fontsize=node_fontsize, color=node_color
)
w, h = mf.wrap(0, 0)
descent = mf.descent()
img = mf.genImage()
client.to_unlink.append(img)
return '<img src="%s" width=%f height=%f valign=%f/>' % (img, w, h, -descent)
|
https://github.com/rst2pdf/rst2pdf/issues/517
|
What steps will reproduce the problem?
1. echo "This is :math:\`\\pi\`" > test.rst
2. rst2pdf test.rst -o test.pdf
What is the expected output? I expect a pdf with the symbol pi
What do you see instead? a python "ValueError" (see the stack below)
Please answer the following questions, they are *very* important:
* Version of rst2pdf?
0.93.dev
* Version of reportlab?
from python, reportlab.__version__ is ' $Id$ '
and the egg says reportlab-3.1.8-py2.7-macosx-10.6-intel.egg
* Are you using wordaxe, and if true, what version?
don't think so
* Are you using rst2pdf with Sphinx, and if true, what version?
No
Please provide any additional information below.
When using inline math role, I get a python error, thus no pdf output. Because
I don't understand it, I have no idea what test to do.
I paste the error stack below. I you have some idea of test to do, please tell
me.
stack:
rst2pdf test.rst -o test.pdf
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/2.7/bin/rst2pdf", line 9, in <module>
load_entry_point('rst2pdf==0.93.dev', 'console_scripts', 'rst2pdf')()
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/rst2pdf-0.93.dev-py2.7.egg/rst2pdf/createpdf.py", line 1495, in main
compressed=options.compressed)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/rst2pdf-0.93.dev-py2.7.egg/rst2pdf/createpdf.py", line 561, in createPdf
elements = self.gen_elements(self.doctree)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/rst2pdf-0.93.dev-py2.7.egg/rst2pdf/basenodehandler.py", line 250, in elemdispatch
elements = self.getelements(client, node, style)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/rst2pdf-0.93.dev-py2.7.egg/rst2pdf/basenodehandler.py", line 225, in getelements
elements = self.gather_elements(client, node, style)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/rst2pdf-0.93.dev-py2.7.egg/rst2pdf/basenodehandler.py", line 205, in gather_elements
return client.gather_elements(node, style=style)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/rst2pdf-0.93.dev-py2.7.egg/rst2pdf/createpdf.py", line 390, in gather_elements
r.extend(self.gen_elements(n, style=style))
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/rst2pdf-0.93.dev-py2.7.egg/rst2pdf/basenodehandler.py", line 250, in elemdispatch
elements = self.getelements(client, node, style)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/rst2pdf-0.93.dev-py2.7.egg/rst2pdf/basenodehandler.py", line 225, in getelements
elements = self.gather_elements(client, node, style)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/rst2pdf-0.93.dev-py2.7.egg/rst2pdf/genelements.py", line 188, in gather_elements
return [Paragraph(client.gen_pdftext(node), style)]
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/reportlab-3.1.8-py2.7-macosx-10.6-intel.egg/reportlab/platypus/paragraph.py", line 987, in __init__
self._setup(text, style, bulletText or getattr(style,'bulletText',None), frags, cleanBlockQuotedText)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/reportlab-3.1.8-py2.7-macosx-10.6-intel.egg/reportlab/platypus/paragraph.py", line 1012, in _setup
% (_parser.errors[0],text[:min(30,len(text))]))
ValueError: xml parser error (valign: invalid value -1.000000/) in paragraph
beginning
'This is <img src="/var/folders'
|
ValueError
|
def Do(
self,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
) -> None:
"""TensorFlow Transform executor entrypoint.
This implements BaseExecutor.Do() and is invoked by orchestration systems.
This is not inteded for manual usage or further customization. Please use
the Transform() function which takes an input format with no artifact
dependency.
Args:
input_dict: Input dict from input key to a list of artifacts, including:
- input_data: A list of type `standard_artifacts.Examples` which should
contain custom splits specified in splits_config. If custom split is
not provided, this should contain two splits 'train' and 'eval'.
- schema: A list of type `standard_artifacts.Schema` which should
contain a single schema artifact.
- analyzer_cache: Cache input of 'tf.Transform', where cached
information for analyzed examples from previous runs will be read.
output_dict: Output dict from key to a list of artifacts, including:
- transform_output: Output of 'tf.Transform', which includes an exported
Tensorflow graph suitable for both training and serving;
- transformed_examples: Materialized transformed examples, which
includes transform splits as specified in splits_config. If custom
split is not provided, this should include both 'train' and 'eval'
splits.
- updated_analyzer_cache: Cache output of 'tf.Transform', where
cached information for analyzed examples will be written.
exec_properties: A dict of execution properties, including:
- module_file: The file path to a python module file, from which the
'preprocessing_fn' function will be loaded.
- preprocessing_fn: The module path to a python function that
implements 'preprocessing_fn'. Exactly one of 'module_file' and
'preprocessing_fn' should be set.
- splits_config: A transform_pb2.SplitsConfig instance, providing splits
that should be analyzed and splits that should be transformed. Note
analyze and transform splits can have overlap. Default behavior (when
splits_config is not set) is analyze the 'train' split and transform
all splits. If splits_config is set, analyze cannot be empty.
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
splits_config = transform_pb2.SplitsConfig()
if exec_properties.get("splits_config", None):
json_format.Parse(exec_properties["splits_config"], splits_config)
if not splits_config.analyze:
raise ValueError("analyze cannot be empty when splits_config is set.")
else:
splits_config.analyze.append("train")
# All input artifacts should have the same set of split names.
split_names = artifact_utils.decode_split_names(
input_dict[EXAMPLES_KEY][0].split_names
)
split_names_set = set(split_names)
for artifact in input_dict[EXAMPLES_KEY]:
artifact_split_names = artifact_utils.decode_split_names(
artifact.split_names
)
if split_names_set != set(artifact_split_names):
raise ValueError(
"Not all input artifacts have the same split names: (%s, %s)"
% (split_names, artifact_split_names)
)
splits_config.transform.extend(split_names)
absl.logging.info(
"Analyze the 'train' split and transform all splits when "
"splits_config is not set."
)
payload_format, data_view_uri = (
tfxio_utils.resolve_payload_format_and_data_view_uri(input_dict[EXAMPLES_KEY])
)
schema_file = io_utils.get_only_uri_in_dir(
artifact_utils.get_single_uri(input_dict[SCHEMA_KEY])
)
transform_output = artifact_utils.get_single_uri(output_dict[TRANSFORM_GRAPH_KEY])
temp_path = os.path.join(transform_output, _TEMP_DIR_IN_TRANSFORM_OUTPUT)
absl.logging.debug("Using temp path %s for tft.beam", temp_path)
analyze_data_paths = []
for split in splits_config.analyze:
data_uris = artifact_utils.get_split_uris(input_dict[EXAMPLES_KEY], split)
for data_uri in data_uris:
analyze_data_paths.append(io_utils.all_files_pattern(data_uri))
transform_data_paths = []
materialize_output_paths = []
if output_dict.get(TRANSFORMED_EXAMPLES_KEY) is not None:
for transformed_example_artifact in output_dict[TRANSFORMED_EXAMPLES_KEY]:
transformed_example_artifact.split_names = (
artifact_utils.encode_split_names(list(splits_config.transform))
)
for split in splits_config.transform:
data_uris = artifact_utils.get_split_uris(input_dict[EXAMPLES_KEY], split)
for data_uri in data_uris:
transform_data_paths.append(io_utils.all_files_pattern(data_uri))
transformed_example_uris = artifact_utils.get_split_uris(
output_dict[TRANSFORMED_EXAMPLES_KEY], split
)
for output_uri in transformed_example_uris:
materialize_output_paths.append(
os.path.join(output_uri, _DEFAULT_TRANSFORMED_EXAMPLES_PREFIX)
)
def _GetCachePath(label, params_dict):
if params_dict.get(label) is None:
return None
else:
return artifact_utils.get_single_uri(params_dict[label])
label_inputs = {
labels.COMPUTE_STATISTICS_LABEL: False,
labels.SCHEMA_PATH_LABEL: schema_file,
labels.EXAMPLES_DATA_FORMAT_LABEL: payload_format,
labels.DATA_VIEW_LABEL: data_view_uri,
labels.ANALYZE_DATA_PATHS_LABEL: analyze_data_paths,
labels.ANALYZE_PATHS_FILE_FORMATS_LABEL: [labels.FORMAT_TFRECORD]
* len(analyze_data_paths),
labels.TRANSFORM_DATA_PATHS_LABEL: transform_data_paths,
labels.TRANSFORM_PATHS_FILE_FORMATS_LABEL: [labels.FORMAT_TFRECORD]
* len(transform_data_paths),
labels.MODULE_FILE: exec_properties.get("module_file", None),
labels.PREPROCESSING_FN: exec_properties.get("preprocessing_fn", None),
labels.CUSTOM_CONFIG: exec_properties.get("custom_config", None),
labels.FORCE_TF_COMPAT_V1_LABEL: True,
}
cache_input = _GetCachePath(ANALYZER_CACHE_KEY, input_dict)
if cache_input is not None:
label_inputs[labels.CACHE_INPUT_PATH_LABEL] = cache_input
label_outputs = {
labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL: transform_output,
labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL: materialize_output_paths,
labels.TEMP_OUTPUT_LABEL: str(temp_path),
}
cache_output = _GetCachePath(UPDATED_ANALYZER_CACHE_KEY, output_dict)
if cache_output is not None:
label_outputs[labels.CACHE_OUTPUT_PATH_LABEL] = cache_output
status_file = "status_file" # Unused
self.Transform(label_inputs, label_outputs, status_file)
absl.logging.debug("Cleaning up temp path %s on executor success", temp_path)
io_utils.delete_dir(temp_path)
|
def Do(
self,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
) -> None:
"""TensorFlow Transform executor entrypoint.
This implements BaseExecutor.Do() and is invoked by orchestration systems.
This is not inteded for manual usage or further customization. Please use
the Transform() function which takes an input format with no artifact
dependency.
Args:
input_dict: Input dict from input key to a list of artifacts, including:
- input_data: A list of type `standard_artifacts.Examples` which should
contain custom splits specified in splits_config. If custom split is
not provided, this should contain two splits 'train' and 'eval'.
- schema: A list of type `standard_artifacts.Schema` which should
contain a single schema artifact.
- analyzer_cache: Cache input of 'tf.Transform', where cached
information for analyzed examples from previous runs will be read.
output_dict: Output dict from key to a list of artifacts, including:
- transform_output: Output of 'tf.Transform', which includes an exported
Tensorflow graph suitable for both training and serving;
- transformed_examples: Materialized transformed examples, which
includes transform splits as specified in splits_config. If custom
split is not provided, this should include both 'train' and 'eval'
splits.
- updated_analyzer_cache: Cache output of 'tf.Transform', where
cached information for analyzed examples will be written.
exec_properties: A dict of execution properties, including:
- module_file: The file path to a python module file, from which the
'preprocessing_fn' function will be loaded.
- preprocessing_fn: The module path to a python function that
implements 'preprocessing_fn'. Exactly one of 'module_file' and
'preprocessing_fn' should be set.
- splits_config: A transform_pb2.SplitsConfig instance, providing splits
that should be analyzed and splits that should be transformed. Note
analyze and transform splits can have overlap. Default behavior (when
splits_config is not set) is analyze the 'train' split and transform
all splits. If splits_config is set, analyze cannot be empty.
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
splits_config = transform_pb2.SplitsConfig()
if exec_properties.get("splits_config", None):
json_format.Parse(exec_properties["splits_config"], splits_config)
if not splits_config.analyze:
raise ValueError("analyze cannot be empty when splits_config is set.")
else:
splits_config.analyze.append("train")
# All input artifacts should have the same set of split names.
split_names = artifact_utils.decode_split_names(
input_dict[EXAMPLES_KEY][0].split_names
)
split_names_set = set(split_names)
for artifact in input_dict[EXAMPLES_KEY]:
artifact_split_names = artifact_utils.decode_split_names(
artifact.split_names
)
if split_names_set != set(artifact_split_names):
raise ValueError(
"Not all input artifacts have the same split names: (%s, %s)"
% (split_names, artifact_split_names)
)
splits_config.transform.extend(split_names)
absl.logging.info(
"Analyze the 'train' split and transform all splits when "
"splits_config is not set."
)
payload_format, data_view_uri = (
tfxio_utils.resolve_payload_format_and_data_view_uri(input_dict[EXAMPLES_KEY])
)
schema_file = io_utils.get_only_uri_in_dir(
artifact_utils.get_single_uri(input_dict[SCHEMA_KEY])
)
transform_output = artifact_utils.get_single_uri(output_dict[TRANSFORM_GRAPH_KEY])
temp_path = os.path.join(transform_output, _TEMP_DIR_IN_TRANSFORM_OUTPUT)
absl.logging.debug("Using temp path %s for tft.beam", temp_path)
analyze_data_paths = []
for split in splits_config.analyze:
data_uris = artifact_utils.get_split_uris(input_dict[EXAMPLES_KEY], split)
for data_uri in data_uris:
analyze_data_paths.append(io_utils.all_files_pattern(data_uri))
transform_data_paths = []
materialize_output_paths = []
if output_dict.get(TRANSFORMED_EXAMPLES_KEY) is not None:
for transformed_example_artifact in output_dict[TRANSFORMED_EXAMPLES_KEY]:
transformed_example_artifact.split_names = (
artifact_utils.encode_split_names(list(splits_config.transform))
)
for split in splits_config.transform:
data_uris = artifact_utils.get_split_uris(input_dict[EXAMPLES_KEY], split)
for data_uri in data_uris:
transform_data_paths.append(io_utils.all_files_pattern(data_uri))
transformed_example_uris = artifact_utils.get_split_uris(
output_dict[TRANSFORMED_EXAMPLES_KEY], split
)
for output_uri in transformed_example_uris:
materialize_output_paths.append(
os.path.join(output_uri, _DEFAULT_TRANSFORMED_EXAMPLES_PREFIX)
)
def _GetCachePath(label, params_dict):
if params_dict.get(label) is None:
return None
else:
return artifact_utils.get_single_uri(params_dict[label])
label_inputs = {
labels.COMPUTE_STATISTICS_LABEL: False,
labels.SCHEMA_PATH_LABEL: schema_file,
labels.EXAMPLES_DATA_FORMAT_LABEL: payload_format,
labels.DATA_VIEW_LABEL: data_view_uri,
labels.ANALYZE_DATA_PATHS_LABEL: analyze_data_paths,
labels.ANALYZE_PATHS_FILE_FORMATS_LABEL: [labels.FORMAT_TFRECORD]
* len(analyze_data_paths),
labels.TRANSFORM_DATA_PATHS_LABEL: transform_data_paths,
labels.TRANSFORM_PATHS_FILE_FORMATS_LABEL: [labels.FORMAT_TFRECORD]
* len(transform_data_paths),
labels.MODULE_FILE: exec_properties.get("module_file", None),
labels.PREPROCESSING_FN: exec_properties.get("preprocessing_fn", None),
labels.CUSTOM_CONFIG: exec_properties.get("custom_config", None),
}
cache_input = _GetCachePath(ANALYZER_CACHE_KEY, input_dict)
if cache_input is not None:
label_inputs[labels.CACHE_INPUT_PATH_LABEL] = cache_input
label_outputs = {
labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL: transform_output,
labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL: materialize_output_paths,
labels.TEMP_OUTPUT_LABEL: str(temp_path),
}
cache_output = _GetCachePath(UPDATED_ANALYZER_CACHE_KEY, output_dict)
if cache_output is not None:
label_outputs[labels.CACHE_OUTPUT_PATH_LABEL] = cache_output
status_file = "status_file" # Unused
self.Transform(label_inputs, label_outputs, status_file)
absl.logging.debug("Cleaning up temp path %s on executor success", temp_path)
io_utils.delete_dir(temp_path)
|
https://github.com/tensorflow/tfx/issues/1665
|
RuntimeError: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/apache_beam/runners/worker/sdk_worker.py", line 312, in get
processor = self.cached_bundle_processors[bundle_descriptor_id].pop()
IndexError: pop from empty list
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/apache_beam/internal/pickler.py", line 250, in dumps
s = dill.dumps(o)
...
File "/usr/local/lib/python3.6/dist-packages/apache_beam/internal/pickler.py", line 77, in _is_nested_class
and cls.__name__ not in sys.modules[cls.__module__].__dict__)
KeyError: 'user_module'
During handling of the above exception, another exception occurred:
|
IndexError
|
def __init__(
self,
input_cache_dir: Text,
output_cache_dir: Text,
analyze_data_list: List[_Dataset],
typespecs: Mapping[Text, tf.TypeSpec],
preprocessing_fn: Any,
cache_source: beam.PTransform,
force_tf_compat_v1: bool,
):
# pyformat: enable
self._input_cache_dir = input_cache_dir
self._output_cache_dir = output_cache_dir
self._analyze_data_list = analyze_data_list
self._feature_spec_or_typespec = typespecs
self._preprocessing_fn = preprocessing_fn
self._cache_source = cache_source
self._force_tf_compat_v1 = force_tf_compat_v1
|
def __init__(
self,
input_cache_dir: Text,
output_cache_dir: Text,
analyze_data_list: List[_Dataset],
typespecs: Mapping[Text, tf.TypeSpec],
preprocessing_fn: Any,
cache_source: beam.PTransform,
):
# pyformat: enable
self._input_cache_dir = input_cache_dir
self._output_cache_dir = output_cache_dir
self._analyze_data_list = analyze_data_list
self._feature_spec_or_typespec = typespecs
self._preprocessing_fn = preprocessing_fn
self._cache_source = cache_source
|
https://github.com/tensorflow/tfx/issues/1665
|
RuntimeError: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/apache_beam/runners/worker/sdk_worker.py", line 312, in get
processor = self.cached_bundle_processors[bundle_descriptor_id].pop()
IndexError: pop from empty list
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/apache_beam/internal/pickler.py", line 250, in dumps
s = dill.dumps(o)
...
File "/usr/local/lib/python3.6/dist-packages/apache_beam/internal/pickler.py", line 77, in _is_nested_class
and cls.__name__ not in sys.modules[cls.__module__].__dict__)
KeyError: 'user_module'
During handling of the above exception, another exception occurred:
|
IndexError
|
def expand(
self, pipeline
) -> Tuple[
Dict[Text, Optional[_Dataset]],
Optional[Dict[Text, Dict[Text, beam.pvalue.PCollection]]],
]:
dataset_keys_list = [dataset.dataset_key for dataset in self._analyze_data_list]
# TODO(b/37788560): Remove this restriction when a greater number of
# stages can be handled efficiently.
cache_entry_keys = tft_beam.analysis_graph_builder.get_analysis_cache_entry_keys(
self._preprocessing_fn,
self._feature_spec_or_typespec,
dataset_keys_list,
self._force_tf_compat_v1,
)
# We estimate the number of stages in the pipeline to be roughly:
# analyzers * analysis_paths * 10.
if (
len(cache_entry_keys) * len(dataset_keys_list) * 10
> _MAX_ESTIMATED_STAGES_COUNT
):
absl.logging.warning(
"Disabling cache because otherwise the number of stages might be "
"too high ({} analyzers, {} analysis paths)".format(
len(cache_entry_keys), len(dataset_keys_list)
)
)
# Returning None as the input cache here disables both input and output
# cache.
return ({d.dataset_key: d for d in self._analyze_data_list}, None)
if self._input_cache_dir is not None:
absl.logging.info(
"Reading the following analysis cache entry keys: %s", cache_entry_keys
)
input_cache = pipeline | "ReadCache" >> analyzer_cache.ReadAnalysisCacheFromFS(
self._input_cache_dir,
dataset_keys_list,
source=self._cache_source,
cache_entry_keys=cache_entry_keys,
)
elif self._output_cache_dir is not None:
input_cache = {}
else:
# Using None here to indicate that this pipeline will not read or write
# cache.
input_cache = None
if input_cache is None:
# Cache is disabled so we won't be filtering out any datasets, and will
# always perform a flatten over all of them.
filtered_analysis_dataset_keys = dataset_keys_list
else:
filtered_analysis_dataset_keys = (
tft_beam.analysis_graph_builder.get_analysis_dataset_keys(
self._preprocessing_fn,
self._feature_spec_or_typespec,
dataset_keys_list,
input_cache,
self._force_tf_compat_v1,
)
)
new_analyze_data_dict = {}
for dataset in self._analyze_data_list:
if dataset.dataset_key in filtered_analysis_dataset_keys:
new_analyze_data_dict[dataset.dataset_key] = dataset
else:
new_analyze_data_dict[dataset.dataset_key] = None
return (new_analyze_data_dict, input_cache)
|
def expand(
self, pipeline
) -> Tuple[
Dict[Text, Optional[_Dataset]],
Optional[Dict[Text, Dict[Text, beam.pvalue.PCollection]]],
]:
dataset_keys_list = [dataset.dataset_key for dataset in self._analyze_data_list]
# TODO(b/37788560): Remove this restriction when a greater number of
# stages can be handled efficiently.
cache_entry_keys = tft_beam.analysis_graph_builder.get_analysis_cache_entry_keys(
self._preprocessing_fn, self._feature_spec_or_typespec, dataset_keys_list
)
# We estimate the number of stages in the pipeline to be roughly:
# analyzers * analysis_paths * 10.
if (
len(cache_entry_keys) * len(dataset_keys_list) * 10
> _MAX_ESTIMATED_STAGES_COUNT
):
absl.logging.warning(
"Disabling cache because otherwise the number of stages might be "
"too high ({} analyzers, {} analysis paths)".format(
len(cache_entry_keys), len(dataset_keys_list)
)
)
# Returning None as the input cache here disables both input and output
# cache.
return ({d.dataset_key: d for d in self._analyze_data_list}, None)
if self._input_cache_dir is not None:
absl.logging.info(
"Reading the following analysis cache entry keys: %s", cache_entry_keys
)
input_cache = pipeline | "ReadCache" >> analyzer_cache.ReadAnalysisCacheFromFS(
self._input_cache_dir,
dataset_keys_list,
source=self._cache_source,
cache_entry_keys=cache_entry_keys,
)
elif self._output_cache_dir is not None:
input_cache = {}
else:
# Using None here to indicate that this pipeline will not read or write
# cache.
input_cache = None
if input_cache is None:
# Cache is disabled so we won't be filtering out any datasets, and will
# always perform a flatten over all of them.
filtered_analysis_dataset_keys = dataset_keys_list
else:
filtered_analysis_dataset_keys = (
tft_beam.analysis_graph_builder.get_analysis_dataset_keys(
self._preprocessing_fn,
self._feature_spec_or_typespec,
dataset_keys_list,
input_cache,
)
)
new_analyze_data_dict = {}
for dataset in self._analyze_data_list:
if dataset.dataset_key in filtered_analysis_dataset_keys:
new_analyze_data_dict[dataset.dataset_key] = dataset
else:
new_analyze_data_dict[dataset.dataset_key] = None
return (new_analyze_data_dict, input_cache)
|
https://github.com/tensorflow/tfx/issues/1665
|
RuntimeError: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/apache_beam/runners/worker/sdk_worker.py", line 312, in get
processor = self.cached_bundle_processors[bundle_descriptor_id].pop()
IndexError: pop from empty list
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/apache_beam/internal/pickler.py", line 250, in dumps
s = dill.dumps(o)
...
File "/usr/local/lib/python3.6/dist-packages/apache_beam/internal/pickler.py", line 77, in _is_nested_class
and cls.__name__ not in sys.modules[cls.__module__].__dict__)
KeyError: 'user_module'
During handling of the above exception, another exception occurred:
|
IndexError
|
def Transform(
self, inputs: Mapping[Text, Any], outputs: Mapping[Text, Any], status_file: Text
) -> None:
"""Executes on request.
This is the implementation part of transform executor. This is intended for
using or extending the executor without artifact dependency.
Args:
inputs: A dictionary of labelled input values, including:
- labels.COMPUTE_STATISTICS_LABEL: Whether compute statistics.
- labels.SCHEMA_PATH_LABEL: Path to schema file.
- labels.EXAMPLES_DATA_FORMAT_LABEL: Example data format, one of the
enums from example_gen_pb2.PayloadFormat.
- labels.ANALYZE_DATA_PATHS_LABEL: Paths or path patterns to analyze
data.
- labels.ANALYZE_PATHS_FILE_FORMATS_LABEL: File formats of paths to
analyze data.
- labels.TRANSFORM_DATA_PATHS_LABEL: Paths or path patterns to transform
data.
- labels.TRANSFORM_PATHS_FILE_FORMATS_LABEL: File formats of paths to
transform data.
- labels.MODULE_FILE: Path to a Python module that contains the
preprocessing_fn, optional.
- labels.PREPROCESSING_FN: Path to a Python function that implements
preprocessing_fn, optional.
- labels.CUSTOM_CONFIG: Dictionary of additional parameters for
preprocessing_fn, optional.
- labels.DATA_VIEW_LABEL: DataView to be used to read the Example,
optional
- labels.FORCE_TF_COMPAT_V1_LABEL: Whether to use TF in compat.v1 mode
irrespective of installed/enabled TF behaviors.
outputs: A dictionary of labelled output values, including:
- labels.PER_SET_STATS_OUTPUT_PATHS_LABEL: Paths to statistics output,
optional.
- labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL: A path to
TFTransformOutput output.
- labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL: Paths to transform
materialization.
- labels.TEMP_OUTPUT_LABEL: A path to temporary directory.
status_file: Where the status should be written (not yet implemented)
"""
del status_file # unused
absl.logging.debug("Inputs to executor.Transform function: {}".format(inputs))
absl.logging.debug("Outputs to executor.Transform function: {}".format(outputs))
compute_statistics = value_utils.GetSoleValue(
inputs, labels.COMPUTE_STATISTICS_LABEL
)
transform_output_path = value_utils.GetSoleValue(
outputs, labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL
)
raw_examples_data_format = value_utils.GetSoleValue(
inputs, labels.EXAMPLES_DATA_FORMAT_LABEL
)
schema = value_utils.GetSoleValue(inputs, labels.SCHEMA_PATH_LABEL)
input_dataset_metadata = self._ReadMetadata(raw_examples_data_format, schema)
materialize_output_paths = value_utils.GetValues(
outputs, labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL
)
preprocessing_fn = self._GetPreprocessingFn(inputs, outputs)
per_set_stats_output_paths = value_utils.GetValues(
outputs, labels.PER_SET_STATS_OUTPUT_PATHS_LABEL
)
analyze_data_paths = value_utils.GetValues(inputs, labels.ANALYZE_DATA_PATHS_LABEL)
analyze_paths_file_formats = value_utils.GetValues(
inputs, labels.ANALYZE_PATHS_FILE_FORMATS_LABEL
)
transform_data_paths = value_utils.GetValues(
inputs, labels.TRANSFORM_DATA_PATHS_LABEL
)
transform_paths_file_formats = value_utils.GetValues(
inputs, labels.TRANSFORM_PATHS_FILE_FORMATS_LABEL
)
input_cache_dir = value_utils.GetSoleValue(
inputs, labels.CACHE_INPUT_PATH_LABEL, strict=False
)
output_cache_dir = value_utils.GetSoleValue(
outputs, labels.CACHE_OUTPUT_PATH_LABEL, strict=False
)
per_set_stats_output_paths = value_utils.GetValues(
outputs, labels.PER_SET_STATS_OUTPUT_PATHS_LABEL
)
temp_path = value_utils.GetSoleValue(outputs, labels.TEMP_OUTPUT_LABEL)
data_view_uri = value_utils.GetSoleValue(
inputs, labels.DATA_VIEW_LABEL, strict=False
)
force_tf_compat_v1 = value_utils.GetSoleValue(
inputs, labels.FORCE_TF_COMPAT_V1_LABEL
)
absl.logging.debug("Force tf.compat.v1: %s", force_tf_compat_v1)
absl.logging.debug("Analyze data patterns: %s", list(enumerate(analyze_data_paths)))
absl.logging.debug(
"Transform data patterns: %s", list(enumerate(transform_data_paths))
)
absl.logging.debug(
"Transform materialization output paths: %s",
list(enumerate(materialize_output_paths)),
)
absl.logging.debug("Transform output path: %s", transform_output_path)
if len(analyze_data_paths) != len(analyze_paths_file_formats):
raise ValueError(
"size of analyze_data_paths and "
"analyze_paths_file_formats do not match: {} v.s {}".format(
len(analyze_data_paths), len(analyze_paths_file_formats)
)
)
if len(transform_data_paths) != len(transform_paths_file_formats):
raise ValueError(
"size of transform_data_paths and "
"transform_paths_file_formats do not match: {} v.s {}".format(
len(transform_data_paths), len(transform_paths_file_formats)
)
)
can_process_analysis_jointly = not bool(output_cache_dir)
analyze_data_list = self._MakeDatasetList(
analyze_data_paths,
analyze_paths_file_formats,
raw_examples_data_format,
data_view_uri,
can_process_analysis_jointly,
)
if not analyze_data_list:
raise ValueError("Analyze data list must not be empty.")
can_process_transform_jointly = not bool(
per_set_stats_output_paths or materialize_output_paths
)
transform_data_list = self._MakeDatasetList(
transform_data_paths,
transform_paths_file_formats,
raw_examples_data_format,
data_view_uri,
can_process_transform_jointly,
per_set_stats_output_paths,
materialize_output_paths,
)
all_datasets = analyze_data_list + transform_data_list
for d in all_datasets:
d.tfxio = self._CreateTFXIO(d, input_dataset_metadata.schema)
self._AssertSameTFXIOSchema(all_datasets)
typespecs = all_datasets[0].tfxio.TensorAdapter().OriginalTypeSpecs()
# Inspecting the preprocessing_fn even if we know we need a full pass in
# order to fail faster if it fails.
analyze_input_columns = tft.get_analyze_input_columns(
preprocessing_fn, typespecs, force_tf_compat_v1=force_tf_compat_v1
)
if not compute_statistics and not materialize_output_paths:
if analyze_input_columns:
absl.logging.warning(
"Not using the in-place Transform because the following features "
"require analyzing: {}".format(tuple(c for c in analyze_input_columns))
)
else:
absl.logging.warning(
"Using the in-place Transform since compute_statistics=False, "
"it does not materialize transformed data, and the configured "
"preprocessing_fn appears to not require analyzing the data."
)
self._RunInPlaceImpl(
preprocessing_fn,
force_tf_compat_v1,
input_dataset_metadata,
typespecs,
transform_output_path,
)
# TODO(b/122478841): Writes status to status file.
return
materialization_format = (
transform_paths_file_formats[-1] if materialize_output_paths else None
)
self._RunBeamImpl(
analyze_data_list,
transform_data_list,
preprocessing_fn,
force_tf_compat_v1,
input_dataset_metadata,
transform_output_path,
raw_examples_data_format,
temp_path,
input_cache_dir,
output_cache_dir,
compute_statistics,
per_set_stats_output_paths,
materialization_format,
len(analyze_data_paths),
)
|
def Transform(
self, inputs: Mapping[Text, Any], outputs: Mapping[Text, Any], status_file: Text
) -> None:
"""Executes on request.
This is the implementation part of transform executor. This is intended for
using or extending the executor without artifact dependency.
Args:
inputs: A dictionary of labelled input values, including:
- labels.COMPUTE_STATISTICS_LABEL: Whether compute statistics.
- labels.SCHEMA_PATH_LABEL: Path to schema file.
- labels.EXAMPLES_DATA_FORMAT_LABEL: Example data format, one of the
enums from example_gen_pb2.PayloadFormat.
- labels.ANALYZE_DATA_PATHS_LABEL: Paths or path patterns to analyze
data.
- labels.ANALYZE_PATHS_FILE_FORMATS_LABEL: File formats of paths to
analyze data.
- labels.TRANSFORM_DATA_PATHS_LABEL: Paths or path patterns to transform
data.
- labels.TRANSFORM_PATHS_FILE_FORMATS_LABEL: File formats of paths to
transform data.
- labels.MODULE_FILE: Path to a Python module that contains the
preprocessing_fn, optional.
- labels.PREPROCESSING_FN: Path to a Python function that implements
preprocessing_fn, optional.
- labels.CUSTOM_CONFIG: Dictionary of additional parameters for
preprocessing_fn, optional.
- labels.DATA_VIEW_LABEL: DataView to be used to read the Example,
optional
outputs: A dictionary of labelled output values, including:
- labels.PER_SET_STATS_OUTPUT_PATHS_LABEL: Paths to statistics output,
optional.
- labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL: A path to
TFTransformOutput output.
- labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL: Paths to transform
materialization.
- labels.TEMP_OUTPUT_LABEL: A path to temporary directory.
status_file: Where the status should be written (not yet implemented)
"""
del status_file # unused
absl.logging.debug("Inputs to executor.Transform function: {}".format(inputs))
absl.logging.debug("Outputs to executor.Transform function: {}".format(outputs))
compute_statistics = value_utils.GetSoleValue(
inputs, labels.COMPUTE_STATISTICS_LABEL
)
transform_output_path = value_utils.GetSoleValue(
outputs, labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL
)
raw_examples_data_format = value_utils.GetSoleValue(
inputs, labels.EXAMPLES_DATA_FORMAT_LABEL
)
schema = value_utils.GetSoleValue(inputs, labels.SCHEMA_PATH_LABEL)
input_dataset_metadata = self._ReadMetadata(raw_examples_data_format, schema)
materialize_output_paths = value_utils.GetValues(
outputs, labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL
)
preprocessing_fn = self._GetPreprocessingFn(inputs, outputs)
per_set_stats_output_paths = value_utils.GetValues(
outputs, labels.PER_SET_STATS_OUTPUT_PATHS_LABEL
)
analyze_data_paths = value_utils.GetValues(inputs, labels.ANALYZE_DATA_PATHS_LABEL)
analyze_paths_file_formats = value_utils.GetValues(
inputs, labels.ANALYZE_PATHS_FILE_FORMATS_LABEL
)
transform_data_paths = value_utils.GetValues(
inputs, labels.TRANSFORM_DATA_PATHS_LABEL
)
transform_paths_file_formats = value_utils.GetValues(
inputs, labels.TRANSFORM_PATHS_FILE_FORMATS_LABEL
)
input_cache_dir = value_utils.GetSoleValue(
inputs, labels.CACHE_INPUT_PATH_LABEL, strict=False
)
output_cache_dir = value_utils.GetSoleValue(
outputs, labels.CACHE_OUTPUT_PATH_LABEL, strict=False
)
per_set_stats_output_paths = value_utils.GetValues(
outputs, labels.PER_SET_STATS_OUTPUT_PATHS_LABEL
)
temp_path = value_utils.GetSoleValue(outputs, labels.TEMP_OUTPUT_LABEL)
data_view_uri = value_utils.GetSoleValue(
inputs, labels.DATA_VIEW_LABEL, strict=False
)
absl.logging.debug("Analyze data patterns: %s", list(enumerate(analyze_data_paths)))
absl.logging.debug(
"Transform data patterns: %s", list(enumerate(transform_data_paths))
)
absl.logging.debug(
"Transform materialization output paths: %s",
list(enumerate(materialize_output_paths)),
)
absl.logging.debug("Transform output path: %s", transform_output_path)
if len(analyze_data_paths) != len(analyze_paths_file_formats):
raise ValueError(
"size of analyze_data_paths and "
"analyze_paths_file_formats do not match: {} v.s {}".format(
len(analyze_data_paths), len(analyze_paths_file_formats)
)
)
if len(transform_data_paths) != len(transform_paths_file_formats):
raise ValueError(
"size of transform_data_paths and "
"transform_paths_file_formats do not match: {} v.s {}".format(
len(transform_data_paths), len(transform_paths_file_formats)
)
)
can_process_analysis_jointly = not bool(output_cache_dir)
analyze_data_list = self._MakeDatasetList(
analyze_data_paths,
analyze_paths_file_formats,
raw_examples_data_format,
data_view_uri,
can_process_analysis_jointly,
)
if not analyze_data_list:
raise ValueError("Analyze data list must not be empty.")
can_process_transform_jointly = not bool(
per_set_stats_output_paths or materialize_output_paths
)
transform_data_list = self._MakeDatasetList(
transform_data_paths,
transform_paths_file_formats,
raw_examples_data_format,
data_view_uri,
can_process_transform_jointly,
per_set_stats_output_paths,
materialize_output_paths,
)
all_datasets = analyze_data_list + transform_data_list
for d in all_datasets:
d.tfxio = self._CreateTFXIO(d, input_dataset_metadata.schema)
self._AssertSameTFXIOSchema(all_datasets)
typespecs = all_datasets[0].tfxio.TensorAdapter().OriginalTypeSpecs()
# Inspecting the preprocessing_fn even if we know we need a full pass in
# order to fail faster if it fails.
analyze_input_columns = tft.get_analyze_input_columns(preprocessing_fn, typespecs)
if not compute_statistics and not materialize_output_paths:
if analyze_input_columns:
absl.logging.warning(
"Not using the in-place Transform because the following features "
"require analyzing: {}".format(tuple(c for c in analyze_input_columns))
)
else:
absl.logging.warning(
"Using the in-place Transform since compute_statistics=False, "
"it does not materialize transformed data, and the configured "
"preprocessing_fn appears to not require analyzing the data."
)
self._RunInPlaceImpl(
preprocessing_fn,
input_dataset_metadata,
typespecs,
transform_output_path,
)
# TODO(b/122478841): Writes status to status file.
return
materialization_format = (
transform_paths_file_formats[-1] if materialize_output_paths else None
)
self._RunBeamImpl(
analyze_data_list,
transform_data_list,
preprocessing_fn,
input_dataset_metadata,
transform_output_path,
raw_examples_data_format,
temp_path,
input_cache_dir,
output_cache_dir,
compute_statistics,
per_set_stats_output_paths,
materialization_format,
len(analyze_data_paths),
)
|
https://github.com/tensorflow/tfx/issues/1665
|
RuntimeError: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/apache_beam/runners/worker/sdk_worker.py", line 312, in get
processor = self.cached_bundle_processors[bundle_descriptor_id].pop()
IndexError: pop from empty list
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/apache_beam/internal/pickler.py", line 250, in dumps
s = dill.dumps(o)
...
File "/usr/local/lib/python3.6/dist-packages/apache_beam/internal/pickler.py", line 77, in _is_nested_class
and cls.__name__ not in sys.modules[cls.__module__].__dict__)
KeyError: 'user_module'
During handling of the above exception, another exception occurred:
|
IndexError
|
def _RunBeamImpl(
self,
analyze_data_list: List[_Dataset],
transform_data_list: List[_Dataset],
preprocessing_fn: Any,
force_tf_compat_v1: bool,
input_dataset_metadata: dataset_metadata.DatasetMetadata,
transform_output_path: Text,
raw_examples_data_format: int,
temp_path: Text,
input_cache_dir: Optional[Text],
output_cache_dir: Optional[Text],
compute_statistics: bool,
per_set_stats_output_paths: Sequence[Text],
materialization_format: Optional[Text],
analyze_paths_count: int,
) -> _Status:
"""Perform data preprocessing with TFT.
Args:
analyze_data_list: List of datasets for analysis.
transform_data_list: List of datasets for transform.
preprocessing_fn: The tf.Transform preprocessing_fn.
force_tf_compat_v1: If True, call Transform's API to use Tensorflow in
tf.compat.v1 mode.
input_dataset_metadata: A DatasetMetadata object for the input data.
transform_output_path: An absolute path to write the output to.
raw_examples_data_format: The data format of the raw examples. One of the
enums from example_gen_pb2.PayloadFormat.
temp_path: A path to a temporary dir.
input_cache_dir: A dir containing the input analysis cache. May be None.
output_cache_dir: A dir to write the analysis cache to. May be None.
compute_statistics: A bool indicating whether or not compute statistics.
per_set_stats_output_paths: Paths to per-set statistics output. If empty,
per-set statistics is not produced.
materialization_format: A string describing the format of the materialized
data or None if materialization is not enabled.
analyze_paths_count: An integer, the number of paths that should be used
for analysis.
Returns:
Status of the execution.
"""
self._AssertSameTFXIOSchema(analyze_data_list)
unprojected_typespecs = (
analyze_data_list[0].tfxio.TensorAdapter().OriginalTypeSpecs()
)
analyze_input_columns = tft.get_analyze_input_columns(
preprocessing_fn, unprojected_typespecs, force_tf_compat_v1=force_tf_compat_v1
)
transform_input_columns = tft.get_transform_input_columns(
preprocessing_fn, unprojected_typespecs, force_tf_compat_v1=force_tf_compat_v1
)
# Use the same dataset (same columns) for AnalyzeDataset and computing
# pre-transform stats so that the data will only be read once for these
# two operations.
if compute_statistics:
analyze_input_columns = list(
set(list(analyze_input_columns) + list(transform_input_columns))
)
for d in analyze_data_list:
d.tfxio = d.tfxio.Project(analyze_input_columns)
self._AssertSameTFXIOSchema(analyze_data_list)
analyze_data_tensor_adapter_config = analyze_data_list[
0
].tfxio.TensorAdapterConfig()
for d in transform_data_list:
d.tfxio = d.tfxio.Project(transform_input_columns)
desired_batch_size = self._GetDesiredBatchSize(raw_examples_data_format)
with self._CreatePipeline(transform_output_path) as pipeline:
with tft_beam.Context(
temp_dir=temp_path,
desired_batch_size=desired_batch_size,
passthrough_keys=self._GetTFXIOPassthroughKeys(),
use_deep_copy_optimization=True,
use_tfxio=True,
force_tf_compat_v1=force_tf_compat_v1,
):
# pylint: disable=expression-not-assigned
# pylint: disable=no-value-for-parameter
_ = pipeline | "IncrementPipelineMetrics" >> self._IncrementPipelineMetrics(
len(unprojected_typespecs),
len(analyze_input_columns),
len(transform_input_columns),
analyze_paths_count,
)
(new_analyze_data_dict, input_cache) = (
pipeline
| "OptimizeRun"
>> self._OptimizeRun(
input_cache_dir,
output_cache_dir,
analyze_data_list,
unprojected_typespecs,
preprocessing_fn,
self._GetCacheSource(),
force_tf_compat_v1,
)
)
if input_cache:
absl.logging.debug("Analyzing data with cache.")
full_analyze_dataset_keys_list = [
dataset.dataset_key for dataset in analyze_data_list
]
# Removing unneeded datasets if they won't be needed for statistics or
# materialization.
if materialization_format is None and not compute_statistics:
if None in new_analyze_data_dict.values():
absl.logging.debug(
"Not reading the following datasets due to cache: %s",
[
dataset.file_pattern
for dataset in analyze_data_list
if new_analyze_data_dict[dataset.dataset_key] is None
],
)
analyze_data_list = [
d for d in new_analyze_data_dict.values() if d is not None
]
for dataset in analyze_data_list:
infix = "AnalysisIndex{}".format(dataset.index)
dataset.standardized = pipeline | "TFXIOReadAndDecode[{}]".format(
infix
) >> dataset.tfxio.BeamSource(desired_batch_size)
input_analysis_data = {}
for key, dataset in new_analyze_data_dict.items():
input_analysis_data[key] = (
None if dataset is None else dataset.standardized
)
transform_fn, cache_output = (
input_analysis_data,
input_cache,
analyze_data_tensor_adapter_config,
) | "Analyze" >> tft_beam.AnalyzeDatasetWithCache(
preprocessing_fn, pipeline=pipeline
)
# Write the raw/input metadata.
(
input_dataset_metadata
| "WriteMetadata"
>> tft_beam.WriteMetadata(
os.path.join(
transform_output_path, tft.TFTransformOutput.RAW_METADATA_DIR
),
pipeline,
)
)
# WriteTransformFn writes transform_fn and metadata to subdirectories
# tensorflow_transform.SAVED_MODEL_DIR and
# tensorflow_transform.TRANSFORMED_METADATA_DIR respectively.
(
transform_fn
| "WriteTransformFn" >> tft_beam.WriteTransformFn(transform_output_path)
)
if output_cache_dir is not None and cache_output is not None:
tf.io.gfile.makedirs(output_cache_dir)
absl.logging.debug("Using existing cache in: %s", input_cache_dir)
if input_cache_dir is not None:
# Only copy cache that is relevant to this iteration. This is
# assuming that this pipeline operates on rolling ranges, so those
# cache entries may also be relevant for future iterations.
for span_cache_dir in input_analysis_data:
full_span_cache_dir = os.path.join(
input_cache_dir, span_cache_dir.key
)
if tf.io.gfile.isdir(full_span_cache_dir):
self._CopyCache(
full_span_cache_dir,
os.path.join(output_cache_dir, span_cache_dir.key),
)
(
cache_output
| "WriteCache"
>> analyzer_cache.WriteAnalysisCacheToFS(
pipeline=pipeline,
cache_base_dir=output_cache_dir,
sink=self._GetCacheSink(),
dataset_keys=full_analyze_dataset_keys_list,
)
)
if compute_statistics or materialization_format is not None:
# Do not compute pre-transform stats if the input format is raw proto,
# as StatsGen would treat any input as tf.Example. Note that
# tf.SequenceExamples are wire-format compatible with tf.Examples.
if compute_statistics and not self._IsDataFormatProto(
raw_examples_data_format
):
# Aggregated feature stats before transformation.
pre_transform_feature_stats_path = os.path.join(
transform_output_path,
tft.TFTransformOutput.PRE_TRANSFORM_FEATURE_STATS_PATH,
)
if self._IsDataFormatSequenceExample(raw_examples_data_format):
schema_proto = None
else:
schema_proto = _GetSchemaProto(input_dataset_metadata)
if self._IsDataFormatSequenceExample(raw_examples_data_format):
def _ExtractRawExampleBatches(record_batch):
return (
record_batch.column(
record_batch.schema.get_field_index(RAW_EXAMPLE_KEY)
)
.flatten()
.to_pylist()
)
# Make use of the fact that tf.SequenceExample is wire-format
# compatible with tf.Example
stats_input = []
for dataset in analyze_data_list:
infix = "AnalysisIndex{}".format(dataset.index)
stats_input.append(
dataset.standardized
| "ExtractRawExampleBatches[{}]".format(infix)
>> beam.Map(_ExtractRawExampleBatches)
| "DecodeSequenceExamplesAsExamplesIntoRecordBatches[{}]".format(
infix
)
>> beam.ParDo(
self._ToArrowRecordBatchesFn(schema_proto)
)
)
else:
stats_input = [
dataset.standardized for dataset in analyze_data_list
]
pre_transform_stats_options = (
transform_stats_options.get_pre_transform_stats_options()
)
(
stats_input
| "FlattenAnalysisDatasets" >> beam.Flatten(pipeline=pipeline)
| "GenerateStats[FlattenedAnalysisDataset]"
>> self._GenerateStats(
pre_transform_feature_stats_path,
schema_proto,
stats_options=pre_transform_stats_options,
)
)
# transform_data_list is a superset of analyze_data_list, we pay the
# cost to read the same dataset (analyze_data_list) again here to
# prevent certain beam runner from doing large temp materialization.
for dataset in transform_data_list:
infix = "TransformIndex{}".format(dataset.index)
dataset.standardized = pipeline | "TFXIOReadAndDecode[{}]".format(
infix
) >> dataset.tfxio.BeamSource(desired_batch_size)
(dataset.transformed, metadata) = (
(dataset.standardized, dataset.tfxio.TensorAdapterConfig()),
transform_fn,
) | "Transform[{}]".format(infix) >> tft_beam.TransformDataset()
dataset.transformed_and_serialized = (
dataset.transformed
| "EncodeAndSerialize[{}]".format(infix)
>> beam.ParDo(
self._EncodeAsSerializedExamples(),
_GetSchemaProto(metadata),
)
)
if compute_statistics:
# Aggregated feature stats after transformation.
_, metadata = transform_fn
# TODO(b/70392441): Retain tf.Metadata (e.g., IntDomain) in
# schema. Currently input dataset schema only contains dtypes,
# and other metadata is dropped due to roundtrip to tensors.
transformed_schema_proto = _GetSchemaProto(metadata)
for dataset in transform_data_list:
infix = "TransformIndex{}".format(dataset.index)
dataset.transformed_and_standardized = (
dataset.transformed_and_serialized
| "FromTransformedToArrowRecordBatches[{}]".format(infix)
>> self._ToArrowRecordBatches(
schema=transformed_schema_proto
)
)
post_transform_feature_stats_path = os.path.join(
transform_output_path,
tft.TFTransformOutput.POST_TRANSFORM_FEATURE_STATS_PATH,
)
post_transform_stats_options = (
transform_stats_options.get_post_transform_stats_options()
)
(
[
dataset.transformed_and_standardized
for dataset in transform_data_list
]
| "FlattenTransformedDatasets" >> beam.Flatten()
| "GenerateStats[FlattenedTransformedDatasets]"
>> self._GenerateStats(
post_transform_feature_stats_path,
transformed_schema_proto,
stats_options=post_transform_stats_options,
)
)
if per_set_stats_output_paths:
# TODO(b/130885503): Remove duplicate stats gen compute that is
# done both on a flattened view of the data, and on each span
# below.
for dataset in transform_data_list:
infix = "TransformIndex{}".format(dataset.index)
(
dataset.transformed_and_standardized
| "GenerateStats[{}]".format(infix)
>> self._GenerateStats(
dataset.stats_output_path,
transformed_schema_proto,
stats_options=post_transform_stats_options,
)
)
if materialization_format is not None:
for dataset in transform_data_list:
infix = "TransformIndex{}".format(dataset.index)
(
dataset.transformed_and_serialized
| "Materialize[{}]".format(infix)
>> self._WriteExamples(
materialization_format, dataset.materialize_output_path
)
)
return _Status.OK()
|
def _RunBeamImpl(
self,
analyze_data_list: List[_Dataset],
transform_data_list: List[_Dataset],
preprocessing_fn: Any,
input_dataset_metadata: dataset_metadata.DatasetMetadata,
transform_output_path: Text,
raw_examples_data_format: int,
temp_path: Text,
input_cache_dir: Optional[Text],
output_cache_dir: Optional[Text],
compute_statistics: bool,
per_set_stats_output_paths: Sequence[Text],
materialization_format: Optional[Text],
analyze_paths_count: int,
) -> _Status:
"""Perform data preprocessing with TFT.
Args:
analyze_data_list: List of datasets for analysis.
transform_data_list: List of datasets for transform.
preprocessing_fn: The tf.Transform preprocessing_fn.
input_dataset_metadata: A DatasetMetadata object for the input data.
transform_output_path: An absolute path to write the output to.
raw_examples_data_format: The data format of the raw examples. One of the
enums from example_gen_pb2.PayloadFormat.
temp_path: A path to a temporary dir.
input_cache_dir: A dir containing the input analysis cache. May be None.
output_cache_dir: A dir to write the analysis cache to. May be None.
compute_statistics: A bool indicating whether or not compute statistics.
per_set_stats_output_paths: Paths to per-set statistics output. If empty,
per-set statistics is not produced.
materialization_format: A string describing the format of the materialized
data or None if materialization is not enabled.
analyze_paths_count: An integer, the number of paths that should be used
for analysis.
Returns:
Status of the execution.
"""
self._AssertSameTFXIOSchema(analyze_data_list)
unprojected_typespecs = (
analyze_data_list[0].tfxio.TensorAdapter().OriginalTypeSpecs()
)
analyze_input_columns = tft.get_analyze_input_columns(
preprocessing_fn, unprojected_typespecs
)
transform_input_columns = tft.get_transform_input_columns(
preprocessing_fn, unprojected_typespecs
)
# Use the same dataset (same columns) for AnalyzeDataset and computing
# pre-transform stats so that the data will only be read once for these
# two operations.
if compute_statistics:
analyze_input_columns = list(
set(list(analyze_input_columns) + list(transform_input_columns))
)
for d in analyze_data_list:
d.tfxio = d.tfxio.Project(analyze_input_columns)
self._AssertSameTFXIOSchema(analyze_data_list)
analyze_data_tensor_adapter_config = analyze_data_list[
0
].tfxio.TensorAdapterConfig()
for d in transform_data_list:
d.tfxio = d.tfxio.Project(transform_input_columns)
desired_batch_size = self._GetDesiredBatchSize(raw_examples_data_format)
with self._CreatePipeline(transform_output_path) as pipeline:
with tft_beam.Context(
temp_dir=temp_path,
desired_batch_size=desired_batch_size,
passthrough_keys=self._GetTFXIOPassthroughKeys(),
use_deep_copy_optimization=True,
use_tfxio=True,
):
# pylint: disable=expression-not-assigned
# pylint: disable=no-value-for-parameter
_ = pipeline | "IncrementPipelineMetrics" >> self._IncrementPipelineMetrics(
len(unprojected_typespecs),
len(analyze_input_columns),
len(transform_input_columns),
analyze_paths_count,
)
(new_analyze_data_dict, input_cache) = (
pipeline
| "OptimizeRun"
>> self._OptimizeRun(
input_cache_dir,
output_cache_dir,
analyze_data_list,
unprojected_typespecs,
preprocessing_fn,
self._GetCacheSource(),
)
)
if input_cache:
absl.logging.debug("Analyzing data with cache.")
full_analyze_dataset_keys_list = [
dataset.dataset_key for dataset in analyze_data_list
]
# Removing unneeded datasets if they won't be needed for statistics or
# materialization.
if materialization_format is None and not compute_statistics:
if None in new_analyze_data_dict.values():
absl.logging.debug(
"Not reading the following datasets due to cache: %s",
[
dataset.file_pattern
for dataset in analyze_data_list
if new_analyze_data_dict[dataset.dataset_key] is None
],
)
analyze_data_list = [
d for d in new_analyze_data_dict.values() if d is not None
]
for dataset in analyze_data_list:
infix = "AnalysisIndex{}".format(dataset.index)
dataset.standardized = pipeline | "TFXIOReadAndDecode[{}]".format(
infix
) >> dataset.tfxio.BeamSource(desired_batch_size)
input_analysis_data = {}
for key, dataset in new_analyze_data_dict.items():
input_analysis_data[key] = (
None if dataset is None else dataset.standardized
)
transform_fn, cache_output = (
input_analysis_data,
input_cache,
analyze_data_tensor_adapter_config,
) | "Analyze" >> tft_beam.AnalyzeDatasetWithCache(
preprocessing_fn, pipeline=pipeline
)
# Write the raw/input metadata.
(
input_dataset_metadata
| "WriteMetadata"
>> tft_beam.WriteMetadata(
os.path.join(
transform_output_path, tft.TFTransformOutput.RAW_METADATA_DIR
),
pipeline,
)
)
# WriteTransformFn writes transform_fn and metadata to subdirectories
# tensorflow_transform.SAVED_MODEL_DIR and
# tensorflow_transform.TRANSFORMED_METADATA_DIR respectively.
(
transform_fn
| "WriteTransformFn" >> tft_beam.WriteTransformFn(transform_output_path)
)
if output_cache_dir is not None and cache_output is not None:
tf.io.gfile.makedirs(output_cache_dir)
absl.logging.debug("Using existing cache in: %s", input_cache_dir)
if input_cache_dir is not None:
# Only copy cache that is relevant to this iteration. This is
# assuming that this pipeline operates on rolling ranges, so those
# cache entries may also be relevant for future iterations.
for span_cache_dir in input_analysis_data:
full_span_cache_dir = os.path.join(
input_cache_dir, span_cache_dir.key
)
if tf.io.gfile.isdir(full_span_cache_dir):
self._CopyCache(
full_span_cache_dir,
os.path.join(output_cache_dir, span_cache_dir.key),
)
(
cache_output
| "WriteCache"
>> analyzer_cache.WriteAnalysisCacheToFS(
pipeline=pipeline,
cache_base_dir=output_cache_dir,
sink=self._GetCacheSink(),
dataset_keys=full_analyze_dataset_keys_list,
)
)
if compute_statistics or materialization_format is not None:
# Do not compute pre-transform stats if the input format is raw proto,
# as StatsGen would treat any input as tf.Example. Note that
# tf.SequenceExamples are wire-format compatible with tf.Examples.
if compute_statistics and not self._IsDataFormatProto(
raw_examples_data_format
):
# Aggregated feature stats before transformation.
pre_transform_feature_stats_path = os.path.join(
transform_output_path,
tft.TFTransformOutput.PRE_TRANSFORM_FEATURE_STATS_PATH,
)
if self._IsDataFormatSequenceExample(raw_examples_data_format):
schema_proto = None
else:
schema_proto = _GetSchemaProto(input_dataset_metadata)
if self._IsDataFormatSequenceExample(raw_examples_data_format):
def _ExtractRawExampleBatches(record_batch):
return (
record_batch.column(
record_batch.schema.get_field_index(RAW_EXAMPLE_KEY)
)
.flatten()
.to_pylist()
)
# Make use of the fact that tf.SequenceExample is wire-format
# compatible with tf.Example
stats_input = []
for dataset in analyze_data_list:
infix = "AnalysisIndex{}".format(dataset.index)
stats_input.append(
dataset.standardized
| "ExtractRawExampleBatches[{}]".format(infix)
>> beam.Map(_ExtractRawExampleBatches)
| "DecodeSequenceExamplesAsExamplesIntoRecordBatches[{}]".format(
infix
)
>> beam.ParDo(
self._ToArrowRecordBatchesFn(schema_proto)
)
)
else:
stats_input = [
dataset.standardized for dataset in analyze_data_list
]
pre_transform_stats_options = (
transform_stats_options.get_pre_transform_stats_options()
)
(
stats_input
| "FlattenAnalysisDatasets" >> beam.Flatten(pipeline=pipeline)
| "GenerateStats[FlattenedAnalysisDataset]"
>> self._GenerateStats(
pre_transform_feature_stats_path,
schema_proto,
stats_options=pre_transform_stats_options,
)
)
# transform_data_list is a superset of analyze_data_list, we pay the
# cost to read the same dataset (analyze_data_list) again here to
# prevent certain beam runner from doing large temp materialization.
for dataset in transform_data_list:
infix = "TransformIndex{}".format(dataset.index)
dataset.standardized = pipeline | "TFXIOReadAndDecode[{}]".format(
infix
) >> dataset.tfxio.BeamSource(desired_batch_size)
(dataset.transformed, metadata) = (
(dataset.standardized, dataset.tfxio.TensorAdapterConfig()),
transform_fn,
) | "Transform[{}]".format(infix) >> tft_beam.TransformDataset()
dataset.transformed_and_serialized = (
dataset.transformed
| "EncodeAndSerialize[{}]".format(infix)
>> beam.ParDo(
self._EncodeAsSerializedExamples(),
_GetSchemaProto(metadata),
)
)
if compute_statistics:
# Aggregated feature stats after transformation.
_, metadata = transform_fn
# TODO(b/70392441): Retain tf.Metadata (e.g., IntDomain) in
# schema. Currently input dataset schema only contains dtypes,
# and other metadata is dropped due to roundtrip to tensors.
transformed_schema_proto = _GetSchemaProto(metadata)
for dataset in transform_data_list:
infix = "TransformIndex{}".format(dataset.index)
dataset.transformed_and_standardized = (
dataset.transformed_and_serialized
| "FromTransformedToArrowRecordBatches[{}]".format(infix)
>> self._ToArrowRecordBatches(
schema=transformed_schema_proto
)
)
post_transform_feature_stats_path = os.path.join(
transform_output_path,
tft.TFTransformOutput.POST_TRANSFORM_FEATURE_STATS_PATH,
)
post_transform_stats_options = (
transform_stats_options.get_post_transform_stats_options()
)
(
[
dataset.transformed_and_standardized
for dataset in transform_data_list
]
| "FlattenTransformedDatasets" >> beam.Flatten()
| "GenerateStats[FlattenedTransformedDatasets]"
>> self._GenerateStats(
post_transform_feature_stats_path,
transformed_schema_proto,
stats_options=post_transform_stats_options,
)
)
if per_set_stats_output_paths:
# TODO(b/130885503): Remove duplicate stats gen compute that is
# done both on a flattened view of the data, and on each span
# below.
for dataset in transform_data_list:
infix = "TransformIndex{}".format(dataset.index)
(
dataset.transformed_and_standardized
| "GenerateStats[{}]".format(infix)
>> self._GenerateStats(
dataset.stats_output_path,
transformed_schema_proto,
stats_options=post_transform_stats_options,
)
)
if materialization_format is not None:
for dataset in transform_data_list:
infix = "TransformIndex{}".format(dataset.index)
(
dataset.transformed_and_serialized
| "Materialize[{}]".format(infix)
>> self._WriteExamples(
materialization_format, dataset.materialize_output_path
)
)
return _Status.OK()
|
https://github.com/tensorflow/tfx/issues/1665
|
RuntimeError: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/apache_beam/runners/worker/sdk_worker.py", line 312, in get
processor = self.cached_bundle_processors[bundle_descriptor_id].pop()
IndexError: pop from empty list
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/apache_beam/internal/pickler.py", line 250, in dumps
s = dill.dumps(o)
...
File "/usr/local/lib/python3.6/dist-packages/apache_beam/internal/pickler.py", line 77, in _is_nested_class
and cls.__name__ not in sys.modules[cls.__module__].__dict__)
KeyError: 'user_module'
During handling of the above exception, another exception occurred:
|
IndexError
|
def _RunInPlaceImpl(
self,
preprocessing_fn: Any,
force_tf_compat_v1: bool,
metadata: dataset_metadata.DatasetMetadata,
typespecs: Dict[Text, tf.TypeSpec],
transform_output_path: Text,
) -> _Status:
"""Runs a transformation iteration in-place without looking at the data.
Args:
preprocessing_fn: The tf.Transform preprocessing_fn.
force_tf_compat_v1: If True, call Transform's API to use Tensorflow in
tf.compat.v1 mode.
metadata: A DatasetMetadata object for the input data.
typespecs: a Dict[Text, tf.TypeSpec]
transform_output_path: An absolute path to write the output to.
Returns:
Status of the execution.
"""
absl.logging.debug("Processing an in-place transform")
raw_metadata_dir = os.path.join(
transform_output_path, tft.TFTransformOutput.RAW_METADATA_DIR
)
metadata_io.write_metadata(metadata, raw_metadata_dir)
# TODO(b/149997088): Use typespecs for the tf.compat.v1 path as well.
feature_specs = schema_utils.schema_as_feature_spec(
_GetSchemaProto(metadata)
).feature_spec
impl_helper.analyze_in_place(
preprocessing_fn,
force_tf_compat_v1,
feature_specs,
typespecs,
transform_output_path,
)
return _Status.OK()
|
def _RunInPlaceImpl(
self,
preprocessing_fn: Any,
metadata: dataset_metadata.DatasetMetadata,
typespecs: Dict[Text, tf.TypeSpec],
transform_output_path: Text,
) -> _Status:
"""Runs a transformation iteration in-place without looking at the data.
Args:
preprocessing_fn: The tf.Transform preprocessing_fn.
metadata: A DatasetMetadata object for the input data.
typespecs: a Dict[Text, tf.TypeSpec]
transform_output_path: An absolute path to write the output to.
Returns:
Status of the execution.
"""
absl.logging.debug("Processing an in-place transform")
raw_metadata_dir = os.path.join(
transform_output_path, tft.TFTransformOutput.RAW_METADATA_DIR
)
metadata_io.write_metadata(metadata, raw_metadata_dir)
with tf.compat.v1.Graph().as_default() as graph:
with tf.compat.v1.Session(graph=graph) as sess:
input_signature = impl_helper.batched_placeholders_from_specs(
schema_utils.schema_as_feature_spec(
_GetSchemaProto(metadata)
).feature_spec
)
# In order to avoid a bug where import_graph_def fails when the
# input_map and return_elements of an imported graph are the same
# (b/34288791), we avoid using the placeholder of an input column as an
# output of a graph. We do this by applying tf.identity to all inputs of
# the preprocessing_fn. Note this applies at the level of raw tensors.
# TODO(b/34288791): Remove this workaround and use a shallow copy of
# inputs instead. A shallow copy is needed in case
# self._preprocessing_fn mutates its input.
copied_inputs = impl_helper.copy_tensors(input_signature)
output_signature = preprocessing_fn(copied_inputs)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.tables_initializer())
transform_fn_path = os.path.join(
transform_output_path, tft.TFTransformOutput.TRANSFORM_FN_DIR
)
saved_transform_io.write_saved_transform_from_session(
sess, input_signature, output_signature, transform_fn_path
)
transformed_metadata = dataset_metadata.DatasetMetadata(
schema=tft.schema_inference.infer_feature_schema(
output_signature, graph, sess
)
)
transformed_metadata_dir = os.path.join(
transform_output_path, tft.TFTransformOutput.TRANSFORMED_METADATA_DIR
)
metadata_io.write_metadata(transformed_metadata, transformed_metadata_dir)
return _Status.OK()
|
https://github.com/tensorflow/tfx/issues/1665
|
RuntimeError: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/apache_beam/runners/worker/sdk_worker.py", line 312, in get
processor = self.cached_bundle_processors[bundle_descriptor_id].pop()
IndexError: pop from empty list
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/apache_beam/internal/pickler.py", line 250, in dumps
s = dill.dumps(o)
...
File "/usr/local/lib/python3.6/dist-packages/apache_beam/internal/pickler.py", line 77, in _is_nested_class
and cls.__name__ not in sys.modules[cls.__module__].__dict__)
KeyError: 'user_module'
During handling of the above exception, another exception occurred:
|
IndexError
|
def import_func_from_source(source_path: Text, fn_name: Text) -> Callable: # pylint: disable=g-bare-generic
"""Imports a function from a module provided as source file."""
# If module path is not local, download to local file-system first,
# because importlib can't import from GCS
source_path = io_utils.ensure_local(source_path)
try:
loader = importlib.machinery.SourceFileLoader(
fullname="user_module",
path=source_path,
)
spec = importlib.util.spec_from_loader(loader.name, loader, origin=source_path)
module = importlib.util.module_from_spec(spec)
sys.modules[loader.name] = module
loader.exec_module(module)
return getattr(module, fn_name)
except IOError:
raise ImportError(
"{} in {} not found in import_func_from_source()".format(
fn_name, source_path
)
)
|
def import_func_from_source(source_path: Text, fn_name: Text) -> Callable: # pylint: disable=g-bare-generic
"""Imports a function from a module provided as source file."""
# If module path is not local, download to local file-system first,
# because importlib can't import from GCS
source_path = io_utils.ensure_local(source_path)
try:
if six.PY2:
import imp # pylint: disable=g-import-not-at-top
try:
user_module = imp.load_source("user_module", source_path)
return getattr(user_module, fn_name)
except IOError:
raise
else:
loader = importlib.machinery.SourceFileLoader(
fullname="user_module",
path=source_path,
)
user_module = types.ModuleType(loader.name)
loader.exec_module(user_module)
return getattr(user_module, fn_name)
except IOError:
raise ImportError(
"{} in {} not found in import_func_from_source()".format(
fn_name, source_path
)
)
|
https://github.com/tensorflow/tfx/issues/1665
|
RuntimeError: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/apache_beam/runners/worker/sdk_worker.py", line 312, in get
processor = self.cached_bundle_processors[bundle_descriptor_id].pop()
IndexError: pop from empty list
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/apache_beam/internal/pickler.py", line 250, in dumps
s = dill.dumps(o)
...
File "/usr/local/lib/python3.6/dist-packages/apache_beam/internal/pickler.py", line 77, in _is_nested_class
and cls.__name__ not in sys.modules[cls.__module__].__dict__)
KeyError: 'user_module'
During handling of the above exception, another exception occurred:
|
IndexError
|
def Do(
self,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
) -> None:
"""Push model to target directory if blessed.
Args:
input_dict: Input dict from input key to a list of artifacts, including:
- model_export: exported model from trainer.
- model_blessing: model blessing path from model_validator. A push
action delivers the model exports produced by Trainer to the
destination defined in component config.
output_dict: Output dict from key to a list of artifacts, including:
- model_push: A list of 'ModelPushPath' artifact of size one. It will
include the model in this push execution if the model was pushed.
exec_properties: A dict of execution properties, including:
- push_destination: JSON string of pusher_pb2.PushDestination instance,
providing instruction of destination to push model.
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
model_push = artifact_utils.get_single_instance(output_dict[PUSHED_MODEL_KEY])
if not self.CheckBlessing(input_dict):
self._MarkNotPushed(model_push)
return
model_export = artifact_utils.get_single_instance(input_dict[MODEL_KEY])
model_path = path_utils.serving_model_path(model_export.uri)
# Push model to the destination, which can be listened by a model server.
#
# If model is already successfully copied to outside before, stop copying.
# This is because model validator might blessed same model twice (check
# mv driver) with different blessing output, we still want Pusher to
# handle the mv output again to keep metadata tracking, but no need to
# copy to outside path again..
# TODO(jyzhao): support rpc push and verification.
push_destination = pusher_pb2.PushDestination()
json_format.Parse(exec_properties["push_destination"], push_destination)
destination_kind = push_destination.WhichOneof("destination")
if destination_kind == "filesystem":
fs_config = push_destination.filesystem
if fs_config.versioning == _Versioning.AUTO:
fs_config.versioning = _Versioning.UNIX_TIMESTAMP
if fs_config.versioning == _Versioning.UNIX_TIMESTAMP:
model_version = str(int(time.time()))
else:
raise NotImplementedError(
"Invalid Versioning {}".format(fs_config.versioning)
)
logging.info("Model version: %s", model_version)
serving_path = os.path.join(fs_config.base_directory, model_version)
if tf.io.gfile.exists(serving_path):
logging.info(
"Destination directory %s already exists, skipping current push.",
serving_path,
)
else:
# tf.serving won't load partial model, it will retry until fully copied.
io_utils.copy_dir(model_path, serving_path)
logging.info("Model written to serving path %s.", serving_path)
else:
raise NotImplementedError(
"Invalid push destination {}".format(destination_kind)
)
# Copy the model to pushing uri for archiving.
io_utils.copy_dir(model_path, model_push.uri)
self._MarkPushed(
model_push, pushed_destination=serving_path, pushed_version=model_version
)
logging.info("Model pushed to %s.", model_push.uri)
|
def Do(
self,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
) -> None:
"""Push model to target directory if blessed.
Args:
input_dict: Input dict from input key to a list of artifacts, including:
- model_export: exported model from trainer.
- model_blessing: model blessing path from model_validator. A push
action delivers the model exports produced by Trainer to the
destination defined in component config.
output_dict: Output dict from key to a list of artifacts, including:
- model_push: A list of 'ModelPushPath' artifact of size one. It will
include the model in this push execution if the model was pushed.
exec_properties: A dict of execution properties, including:
- push_destination: JSON string of pusher_pb2.PushDestination instance,
providing instruction of destination to push model.
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
model_push = artifact_utils.get_single_instance(output_dict[PUSHED_MODEL_KEY])
if not self.CheckBlessing(input_dict):
model_push.set_int_custom_property("pushed", 0)
return
model_push_uri = model_push.uri
model_export = artifact_utils.get_single_instance(input_dict[MODEL_KEY])
model_export_uri = model_export.uri
logging.info("Model pushing.")
# Copy the model to pushing uri.
model_path = path_utils.serving_model_path(model_export_uri)
model_version = path_utils.get_serving_model_version(model_export_uri)
logging.info("Model version is %s", model_version)
io_utils.copy_dir(model_path, os.path.join(model_push_uri, model_version))
logging.info("Model written to %s.", model_push_uri)
# Copied to a fixed outside path, which can be listened by model server.
#
# If model is already successfully copied to outside before, stop copying.
# This is because model validator might blessed same model twice (check
# mv driver) with different blessing output, we still want Pusher to
# handle the mv output again to keep metadata tracking, but no need to
# copy to outside path again..
# TODO(jyzhao): support rpc push and verification.
push_destination = pusher_pb2.PushDestination()
json_format.Parse(exec_properties["push_destination"], push_destination)
serving_path = os.path.join(
push_destination.filesystem.base_directory, model_version
)
if tf.io.gfile.exists(serving_path):
logging.info(
"Destination directory %s already exists, skipping current push.",
serving_path,
)
else:
# tf.serving won't load partial model, it will retry until fully copied.
io_utils.copy_dir(model_path, serving_path)
logging.info("Model written to serving path %s.", serving_path)
model_push.set_int_custom_property("pushed", 1)
model_push.set_string_custom_property("pushed_model", model_export_uri)
model_push.set_int_custom_property("pushed_model_id", model_export.id)
logging.info("Model pushed to %s.", serving_path)
|
https://github.com/tensorflow/tfx/issues/1553
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-38-f67dc69712f4> in <module>
----> 1 context.run(pusher)
~/XXX/tfx/tfx/tfx/orchestration/experimental/interactive/interactive_context.py in run_if_ipython(*args, **kwargs)
64 # __IPYTHON__ variable is set by IPython, see
65 # https://ipython.org/ipython-doc/rel-0.10.2/html/interactive/reference.html#embedding-ipython.
---> 66 return fn(*args, **kwargs)
67 else:
68 absl.logging.warning(
~/XXX/tfx/tfx/tfx/orchestration/experimental/interactive/interactive_context.py in run(self, component, enable_cache, beam_pipeline_args)
166 component, pipeline_info, driver_args, metadata_connection,
167 beam_pipeline_args, additional_pipeline_args)
--> 168 execution_id = launcher.launch().execution_id
169
170 return execution_result.ExecutionResult(
~/XXX/tfx/tfx/tfx/orchestration/launcher/base_component_launcher.py in launch(self)
203 execution_decision.input_dict,
204 execution_decision.output_dict,
--> 205 execution_decision.exec_properties)
206
207 absl.logging.info('Running publisher for %s',
~/XXX/tfx/tfx/tfx/orchestration/launcher/in_process_component_launcher.py in _run_executor(self, execution_id, input_dict, output_dict, exec_properties)
65 executor_context) # type: ignore
66
---> 67 executor.Do(input_dict, output_dict, exec_properties)
~/XXX/tfx/tfx/tfx/extensions/google_cloud_big_query_ml/pusher/executor.py in Do(self, input_dict, output_dict, exec_properties)
59 """
60 self._log_startup(input_dict, output_dict, exec_properties)
---> 61 model_push = artifact_utils.get_single_instance(output_dict['model_push'])
62 if not self.CheckBlessing(input_dict):
63 model_push.set_int_custom_property('pushed', 0)
KeyError: 'model_push'
|
KeyError
|
def Do(
self,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
):
"""Overrides the tfx_pusher_executor.
Args:
input_dict: Input dict from input key to a list of artifacts, including:
- model_export: exported model from trainer.
- model_blessing: model blessing path from model_validator.
output_dict: Output dict from key to a list of artifacts, including:
- model_push: A list of 'ModelPushPath' artifact of size one. It will
include the model in this push execution if the model was pushed.
exec_properties: Mostly a passthrough input dict for
tfx.components.Pusher.executor. custom_config.ai_platform_serving_args
is consumed by this class. For the full set of parameters supported by
Google Cloud AI Platform, refer to
https://cloud.google.com/ml-engine/docs/tensorflow/deploying-models#creating_a_model_version.
Raises:
ValueError:
If ai_platform_serving_args is not in exec_properties.custom_config.
If Serving model path does not start with gs://.
RuntimeError: if the Google Cloud AI Platform training job failed.
"""
self._log_startup(input_dict, output_dict, exec_properties)
model_push = artifact_utils.get_single_instance(
output_dict[tfx_pusher_executor.PUSHED_MODEL_KEY]
)
if not self.CheckBlessing(input_dict):
self._MarkNotPushed(model_push)
return
model_export = artifact_utils.get_single_instance(
input_dict[tfx_pusher_executor.MODEL_KEY]
)
exec_properties_copy = exec_properties.copy()
custom_config = exec_properties_copy.pop(_CUSTOM_CONFIG_KEY, {})
ai_platform_serving_args = custom_config.get(SERVING_ARGS_KEY)
if not ai_platform_serving_args:
raise ValueError("'ai_platform_serving_args' is missing in 'custom_config'")
# Deploy the model.
io_utils.copy_dir(
src=path_utils.serving_model_path(model_export.uri), dst=model_push.uri
)
model_path = model_push.uri
# TODO(jjong): Introduce Versioning.
# Note that we're adding "v" prefix as Cloud AI Prediction only allows the
# version name that starts with letters, and contains letters, digits,
# underscore only.
model_version = "v{}".format(int(time.time()))
executor_class_path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
runner.deploy_model_for_aip_prediction(
model_path,
model_version,
ai_platform_serving_args,
executor_class_path,
)
self._MarkPushed(
model_push,
pushed_destination=_CAIP_MODEL_VERSION_PATH_FORMAT.format(
project_id=ai_platform_serving_args["project_id"],
model=ai_platform_serving_args["model_name"],
version=model_version,
),
pushed_version=model_version,
)
|
def Do(
self,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
):
"""Overrides the tfx_pusher_executor.
Args:
input_dict: Input dict from input key to a list of artifacts, including:
- model_export: exported model from trainer.
- model_blessing: model blessing path from model_validator.
output_dict: Output dict from key to a list of artifacts, including:
- model_push: A list of 'ModelPushPath' artifact of size one. It will
include the model in this push execution if the model was pushed.
exec_properties: Mostly a passthrough input dict for
tfx.components.Pusher.executor. custom_config.ai_platform_serving_args
is consumed by this class. For the full set of parameters supported by
Google Cloud AI Platform, refer to
https://cloud.google.com/ml-engine/docs/tensorflow/deploying-models#creating_a_model_version.
Returns:
None
Raises:
ValueError:
If ai_platform_serving_args is not in exec_properties.custom_config.
If Serving model path does not start with gs://.
RuntimeError: if the Google Cloud AI Platform training job failed.
"""
self._log_startup(input_dict, output_dict, exec_properties)
model_push = artifact_utils.get_single_instance(
output_dict[tfx_pusher_executor.PUSHED_MODEL_KEY]
)
if not self.CheckBlessing(input_dict):
model_push.set_int_custom_property("pushed", 0)
return
model_export = artifact_utils.get_single_instance(
input_dict[tfx_pusher_executor.MODEL_KEY]
)
model_export_uri = model_export.uri
exec_properties_copy = exec_properties.copy()
custom_config = exec_properties_copy.pop("custom_config", {})
ai_platform_serving_args = custom_config[SERVING_ARGS_KEY]
if not ai_platform_serving_args:
raise ValueError("'ai_platform_serving_args' is missing in 'custom_config'")
# Deploy the model.
model_path = path_utils.serving_model_path(model_export_uri)
# Note: we do not have a logical model version right now. This
# model_version is a timestamp mapped to trainer's exporter.
model_version = os.path.basename(model_path)
executor_class_path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
runner.deploy_model_for_aip_prediction(
model_path,
model_version,
ai_platform_serving_args,
executor_class_path,
)
model_push.set_int_custom_property("pushed", 1)
model_push.set_string_custom_property("pushed_model", model_path)
|
https://github.com/tensorflow/tfx/issues/1553
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-38-f67dc69712f4> in <module>
----> 1 context.run(pusher)
~/XXX/tfx/tfx/tfx/orchestration/experimental/interactive/interactive_context.py in run_if_ipython(*args, **kwargs)
64 # __IPYTHON__ variable is set by IPython, see
65 # https://ipython.org/ipython-doc/rel-0.10.2/html/interactive/reference.html#embedding-ipython.
---> 66 return fn(*args, **kwargs)
67 else:
68 absl.logging.warning(
~/XXX/tfx/tfx/tfx/orchestration/experimental/interactive/interactive_context.py in run(self, component, enable_cache, beam_pipeline_args)
166 component, pipeline_info, driver_args, metadata_connection,
167 beam_pipeline_args, additional_pipeline_args)
--> 168 execution_id = launcher.launch().execution_id
169
170 return execution_result.ExecutionResult(
~/XXX/tfx/tfx/tfx/orchestration/launcher/base_component_launcher.py in launch(self)
203 execution_decision.input_dict,
204 execution_decision.output_dict,
--> 205 execution_decision.exec_properties)
206
207 absl.logging.info('Running publisher for %s',
~/XXX/tfx/tfx/tfx/orchestration/launcher/in_process_component_launcher.py in _run_executor(self, execution_id, input_dict, output_dict, exec_properties)
65 executor_context) # type: ignore
66
---> 67 executor.Do(input_dict, output_dict, exec_properties)
~/XXX/tfx/tfx/tfx/extensions/google_cloud_big_query_ml/pusher/executor.py in Do(self, input_dict, output_dict, exec_properties)
59 """
60 self._log_startup(input_dict, output_dict, exec_properties)
---> 61 model_push = artifact_utils.get_single_instance(output_dict['model_push'])
62 if not self.CheckBlessing(input_dict):
63 model_push.set_int_custom_property('pushed', 0)
KeyError: 'model_push'
|
KeyError
|
def deploy_model_for_aip_prediction(
serving_path: Text,
model_version: Text,
ai_platform_serving_args: Dict[Text, Any],
executor_class_path: Text,
):
"""Deploys a model for serving with AI Platform.
Args:
serving_path: The path to the model. Must be a GCS URI.
model_version: Version of the model being deployed. Must be different from
what is currently being served.
ai_platform_serving_args: Dictionary containing arguments for pushing to AI
Platform. For the full set of parameters supported, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions#Version
executor_class_path: class path for TFX core default trainer.
Raises:
RuntimeError: if an error is encountered when trying to push.
"""
absl.logging.info(
"Deploying to model with version {} to AI Platform for serving: {}".format(
model_version, ai_platform_serving_args
)
)
model_name = ai_platform_serving_args["model_name"]
project_id = ai_platform_serving_args["project_id"]
regions = ai_platform_serving_args.get("regions", [])
runtime_version = _get_tf_runtime_version(tf.__version__)
python_version = _get_caip_python_version(runtime_version)
api = discovery.build("ml", "v1")
body = {"name": model_name, "regions": regions}
parent = "projects/{}".format(project_id)
try:
api.projects().models().create(body=body, parent=parent).execute()
except errors.HttpError as e:
# If the error is to create an already existing model, it's ok to ignore.
# TODO(b/135211463): Remove the disable once the pytype bug is fixed.
if e.resp.status == 409: # pytype: disable=attribute-error
absl.logging.warn("Model {} already exists".format(model_name))
else:
raise RuntimeError("AI Platform Push failed: {}".format(e))
with telemetry_utils.scoped_labels(
{telemetry_utils.TFX_EXECUTOR: executor_class_path}
):
job_labels = telemetry_utils.get_labels_dict()
body = {
"name": model_version,
"deployment_uri": serving_path,
"runtime_version": runtime_version,
"python_version": python_version,
"labels": job_labels,
}
# Push to AIP, and record the operation name so we can poll for its state.
model_name = "projects/{}/models/{}".format(project_id, model_name)
response = (
api.projects()
.models()
.versions()
.create(body=body, parent=model_name)
.execute()
)
op_name = response["name"]
while True:
deploy_status = api.projects().operations().get(name=op_name).execute()
if deploy_status.get("done"):
# Set the new version as default.
api.projects().models().versions().setDefault(
name="{}/versions/{}".format(
model_name, deploy_status["response"]["name"]
)
).execute()
break
if "error" in deploy_status:
# The operation completed with an error.
absl.logging.error(deploy_status["error"])
raise RuntimeError(
"Failed to deploy model to AI Platform for serving: {}".format(
deploy_status["error"]
)
)
time.sleep(_POLLING_INTERVAL_IN_SECONDS)
absl.logging.info("Model still being deployed...")
absl.logging.info(
"Successfully deployed model {} with version {}, serving from {}".format(
model_name, model_version, serving_path
)
)
|
def deploy_model_for_aip_prediction(
serving_path: Text,
model_version: Text,
ai_platform_serving_args: Dict[Text, Any],
executor_class_path: Text,
):
"""Deploys a model for serving with AI Platform.
Args:
serving_path: The path to the model. Must be a GCS URI.
model_version: Version of the model being deployed. Must be different from
what is currently being served.
ai_platform_serving_args: Dictionary containing arguments for pushing to AI
Platform. For the full set of parameters supported, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions#Version
executor_class_path: class path for TFX core default trainer.
Raises:
RuntimeError: if an error is encountered when trying to push.
"""
absl.logging.info(
"Deploying to model with version {} to AI Platform for serving: {}".format(
model_version, ai_platform_serving_args
)
)
model_name = ai_platform_serving_args["model_name"]
project_id = ai_platform_serving_args["project_id"]
regions = ai_platform_serving_args.get("regions", [])
runtime_version = _get_tf_runtime_version(tf.__version__)
python_version = _get_caip_python_version(runtime_version)
api = discovery.build("ml", "v1")
body = {"name": model_name, "regions": regions}
parent = "projects/{}".format(project_id)
try:
api.projects().models().create(body=body, parent=parent).execute()
except errors.HttpError as e:
# If the error is to create an already existing model, it's ok to ignore.
# TODO(b/135211463): Remove the disable once the pytype bug is fixed.
if e.resp.status == 409: # pytype: disable=attribute-error
absl.logging.warn("Model {} already exists".format(model_name))
else:
raise RuntimeError("AI Platform Push failed: {}".format(e))
with telemetry_utils.scoped_labels(
{telemetry_utils.TFX_EXECUTOR: executor_class_path}
):
job_labels = telemetry_utils.get_labels_dict()
body = {
"name": "v{}".format(model_version),
"deployment_uri": serving_path,
"runtime_version": runtime_version,
"python_version": python_version,
"labels": job_labels,
}
# Push to AIP, and record the operation name so we can poll for its state.
model_name = "projects/{}/models/{}".format(project_id, model_name)
response = (
api.projects()
.models()
.versions()
.create(body=body, parent=model_name)
.execute()
)
op_name = response["name"]
while True:
deploy_status = api.projects().operations().get(name=op_name).execute()
if deploy_status.get("done"):
# Set the new version as default.
api.projects().models().versions().setDefault(
name="{}/versions/{}".format(
model_name, deploy_status["response"]["name"]
)
).execute()
break
if "error" in deploy_status:
# The operation completed with an error.
absl.logging.error(deploy_status["error"])
raise RuntimeError(
"Failed to deploy model to AI Platform for serving: {}".format(
deploy_status["error"]
)
)
time.sleep(_POLLING_INTERVAL_IN_SECONDS)
absl.logging.info("Model still being deployed...")
absl.logging.info(
"Successfully deployed model {} with version {}, serving from {}".format(
model_name, model_version, serving_path
)
)
|
https://github.com/tensorflow/tfx/issues/1553
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-38-f67dc69712f4> in <module>
----> 1 context.run(pusher)
~/XXX/tfx/tfx/tfx/orchestration/experimental/interactive/interactive_context.py in run_if_ipython(*args, **kwargs)
64 # __IPYTHON__ variable is set by IPython, see
65 # https://ipython.org/ipython-doc/rel-0.10.2/html/interactive/reference.html#embedding-ipython.
---> 66 return fn(*args, **kwargs)
67 else:
68 absl.logging.warning(
~/XXX/tfx/tfx/tfx/orchestration/experimental/interactive/interactive_context.py in run(self, component, enable_cache, beam_pipeline_args)
166 component, pipeline_info, driver_args, metadata_connection,
167 beam_pipeline_args, additional_pipeline_args)
--> 168 execution_id = launcher.launch().execution_id
169
170 return execution_result.ExecutionResult(
~/XXX/tfx/tfx/tfx/orchestration/launcher/base_component_launcher.py in launch(self)
203 execution_decision.input_dict,
204 execution_decision.output_dict,
--> 205 execution_decision.exec_properties)
206
207 absl.logging.info('Running publisher for %s',
~/XXX/tfx/tfx/tfx/orchestration/launcher/in_process_component_launcher.py in _run_executor(self, execution_id, input_dict, output_dict, exec_properties)
65 executor_context) # type: ignore
66
---> 67 executor.Do(input_dict, output_dict, exec_properties)
~/XXX/tfx/tfx/tfx/extensions/google_cloud_big_query_ml/pusher/executor.py in Do(self, input_dict, output_dict, exec_properties)
59 """
60 self._log_startup(input_dict, output_dict, exec_properties)
---> 61 model_push = artifact_utils.get_single_instance(output_dict['model_push'])
62 if not self.CheckBlessing(input_dict):
63 model_push.set_int_custom_property('pushed', 0)
KeyError: 'model_push'
|
KeyError
|
def Do(
self,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
):
"""Overrides the tfx_pusher_executor.
Args:
input_dict: Input dict from input key to a list of artifacts, including:
- model_export: exported model from trainer.
- model_blessing: model blessing path from model_validator.
output_dict: Output dict from key to a list of artifacts, including:
- model_push: A list of 'ModelPushPath' artifact of size one. It will
include the model in this push execution if the model was pushed.
exec_properties: Mostly a passthrough input dict for
tfx.components.Pusher.executor. custom_config.bigquery_serving_args is
consumed by this class. For the full set of parameters supported by
Big Query ML, refer to https://cloud.google.com/bigquery-ml/
Returns:
None
Raises:
ValueError:
If bigquery_serving_args is not in exec_properties.custom_config.
If pipeline_root is not 'gs://...'
RuntimeError: if the Big Query job failed.
"""
self._log_startup(input_dict, output_dict, exec_properties)
model_push = artifact_utils.get_single_instance(
output_dict[tfx_pusher_executor.PUSHED_MODEL_KEY]
)
if not self.CheckBlessing(input_dict):
self._MarkNotPushed(model_push)
return
model_export = artifact_utils.get_single_instance(
input_dict[tfx_pusher_executor.MODEL_KEY]
)
model_export_uri = model_export.uri
custom_config = exec_properties.get(_CUSTOM_CONFIG_KEY, {})
bigquery_serving_args = custom_config.get(SERVING_ARGS_KEY)
# if configuration is missing error out
if bigquery_serving_args is None:
raise ValueError("Big Query ML configuration was not provided")
bq_model_uri = ".".join(
[
bigquery_serving_args[_PROJECT_ID_KEY],
bigquery_serving_args[_BQ_DATASET_ID_KEY],
bigquery_serving_args[_MODEL_NAME_KEY],
]
)
# Deploy the model.
io_utils.copy_dir(
src=path_utils.serving_model_path(model_export_uri), dst=model_push.uri
)
model_path = model_push.uri
if not model_path.startswith(_GCS_PREFIX):
raise ValueError("pipeline_root must be gs:// for BigQuery ML Pusher.")
logging.info(
"Deploying the model to BigQuery ML for serving: %s from %s",
bigquery_serving_args,
model_path,
)
query = _BQML_CREATE_OR_REPLACE_MODEL_QUERY_TEMPLATE.format(
model_uri=bq_model_uri, model_path=model_path
)
# TODO(zhitaoli): Refactor the executor_class_path creation into a common
# utility function.
executor_class_path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
with telemetry_utils.scoped_labels(
{telemetry_utils.TFX_EXECUTOR: executor_class_path}
):
default_query_job_config = bigquery.job.QueryJobConfig(
labels=telemetry_utils.get_labels_dict()
)
client = bigquery.Client(default_query_job_config=default_query_job_config)
try:
query_job = client.query(query)
query_job.result() # Waits for the query to finish
except Exception as e:
raise RuntimeError("BigQuery ML Push failed: {}".format(e))
logging.info(
"Successfully deployed model %s serving from %s", bq_model_uri, model_path
)
# Setting the push_destination to bigquery uri
self._MarkPushed(model_push, pushed_destination=bq_model_uri)
|
def Do(
self,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
):
"""Overrides the tfx_pusher_executor.
Args:
input_dict: Input dict from input key to a list of artifacts, including:
- model_export: exported model from trainer.
- model_blessing: model blessing path from model_validator.
output_dict: Output dict from key to a list of artifacts, including:
- model_push: A list of 'ModelPushPath' artifact of size one. It will
include the model in this push execution if the model was pushed.
exec_properties: Mostly a passthrough input dict for
tfx.components.Pusher.executor. custom_config.bigquery_serving_args is
consumed by this class. For the full set of parameters supported by
Big Query ML, refer to https://cloud.google.com/bigquery-ml/
Returns:
None
Raises:
ValueError:
If bigquery_serving_args is not in exec_properties.custom_config.
If pipeline_root is not 'gs://...'
RuntimeError: if the Big Query job failed.
"""
self._log_startup(input_dict, output_dict, exec_properties)
model_push = artifact_utils.get_single_instance(output_dict["model_push"])
if not self.CheckBlessing(input_dict):
model_push.set_int_custom_property("pushed", 0)
return
model_export = artifact_utils.get_single_instance(input_dict["model_export"])
model_export_uri = model_export.uri
custom_config = exec_properties.get("custom_config", {})
bigquery_serving_args = custom_config.get("bigquery_serving_args", None)
# if configuration is missing error out
if bigquery_serving_args is None:
raise ValueError("Big Query ML configuration was not provided")
bq_model_uri = "`{}`.`{}`.`{}`".format(
bigquery_serving_args["project_id"],
bigquery_serving_args["bq_dataset_id"],
bigquery_serving_args["model_name"],
)
# Deploy the model.
model_path = path_utils.serving_model_path(model_export_uri)
if not model_path.startswith("gs://"):
raise ValueError("pipeline_root must be gs:// for BigQuery ML Pusher.")
absl.logging.info(
"Deploying the model to BigQuery ML for serving: {} from {}".format(
bigquery_serving_args, model_path
)
)
query = """
CREATE OR REPLACE MODEL {}
OPTIONS (model_type='tensorflow',
model_path='{}')""".format(bq_model_uri, os.path.join(model_path, "*"))
# TODO(zhitaoli): Refactor the executor_class_path creation into a common
# utility function.
executor_class_path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
with telemetry_utils.scoped_labels(
{telemetry_utils.TFX_EXECUTOR: executor_class_path}
):
default_query_job_config = bigquery.job.QueryJobConfig(
labels=telemetry_utils.get_labels_dict()
)
client = bigquery.Client(default_query_job_config=default_query_job_config)
try:
query_job = client.query(query)
query_job.result() # Waits for the query to finish
except Exception as e:
raise RuntimeError("BigQuery ML Push failed: {}".format(e))
absl.logging.info(
"Successfully deployed model {} serving from {}".format(
bq_model_uri, model_path
)
)
# Setting the push_destination to bigquery uri
model_push.set_int_custom_property("pushed", 1)
model_push.set_string_custom_property("pushed_model", bq_model_uri)
|
https://github.com/tensorflow/tfx/issues/1553
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-38-f67dc69712f4> in <module>
----> 1 context.run(pusher)
~/XXX/tfx/tfx/tfx/orchestration/experimental/interactive/interactive_context.py in run_if_ipython(*args, **kwargs)
64 # __IPYTHON__ variable is set by IPython, see
65 # https://ipython.org/ipython-doc/rel-0.10.2/html/interactive/reference.html#embedding-ipython.
---> 66 return fn(*args, **kwargs)
67 else:
68 absl.logging.warning(
~/XXX/tfx/tfx/tfx/orchestration/experimental/interactive/interactive_context.py in run(self, component, enable_cache, beam_pipeline_args)
166 component, pipeline_info, driver_args, metadata_connection,
167 beam_pipeline_args, additional_pipeline_args)
--> 168 execution_id = launcher.launch().execution_id
169
170 return execution_result.ExecutionResult(
~/XXX/tfx/tfx/tfx/orchestration/launcher/base_component_launcher.py in launch(self)
203 execution_decision.input_dict,
204 execution_decision.output_dict,
--> 205 execution_decision.exec_properties)
206
207 absl.logging.info('Running publisher for %s',
~/XXX/tfx/tfx/tfx/orchestration/launcher/in_process_component_launcher.py in _run_executor(self, execution_id, input_dict, output_dict, exec_properties)
65 executor_context) # type: ignore
66
---> 67 executor.Do(input_dict, output_dict, exec_properties)
~/XXX/tfx/tfx/tfx/extensions/google_cloud_big_query_ml/pusher/executor.py in Do(self, input_dict, output_dict, exec_properties)
59 """
60 self._log_startup(input_dict, output_dict, exec_properties)
---> 61 model_push = artifact_utils.get_single_instance(output_dict['model_push'])
62 if not self.CheckBlessing(input_dict):
63 model_push.set_int_custom_property('pushed', 0)
KeyError: 'model_push'
|
KeyError
|
def deploy_model_for_cmle_serving(
serving_path: Text, model_version: Text, cmle_serving_args: Dict[Text, Any]
):
"""Deploys a model for serving with CMLE.
Args:
serving_path: The path to the model. Must be a GCS URI.
model_version: Version of the model being deployed. Must be different from
what is currently being served.
cmle_serving_args: Dictionary containing arguments for pushing to CMLE. For
the full set of parameters supported, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions#Version
Raises:
RuntimeError: if an error is encountered when trying to push.
"""
tf.logging.info(
"Deploying to model with version {} to CMLE for serving: {}".format(
model_version, cmle_serving_args
)
)
model_name = cmle_serving_args["model_name"]
project_id = cmle_serving_args["project_id"]
runtime_version = _get_tf_runtime_version()
python_version = _get_caip_python_version()
api = discovery.build("ml", "v1")
body = {"name": model_name}
parent = "projects/{}".format(project_id)
try:
api.projects().models().create(body=body, parent=parent).execute()
except errors.HttpError as e:
# If the error is to create an already existing model, it's ok to ignore.
# TODO(b/135211463): Remove the disable once the pytype bug is fixed.
if e.resp.status == 409: # pytype: disable=attribute-error
tf.logging.warn("Model {} already exists".format(model_name))
else:
raise RuntimeError("CMLE Push failed: {}".format(e))
body = {
"name": "v{}".format(model_version),
"deployment_uri": serving_path,
"runtime_version": runtime_version,
"python_version": python_version,
}
# Push to CMLE, and record the operation name so we can poll for its state.
model_name = "projects/{}/models/{}".format(project_id, model_name)
response = (
api.projects()
.models()
.versions()
.create(body=body, parent=model_name)
.execute()
)
op_name = response["name"]
while True:
deploy_status = api.projects().operations().get(name=op_name).execute()
if deploy_status.get("done"):
break
if "error" in deploy_status:
# The operation completed with an error.
tf.logging.error(deploy_status["error"])
raise RuntimeError(
"Failed to deploy model to CMLE for serving: {}".format(
deploy_status["error"]
)
)
time.sleep(_POLLING_INTERVAL_IN_SECONDS)
tf.logging.info("Model still being deployed...")
tf.logging.info(
"Successfully deployed model {} with version {}, serving from {}".format(
model_name, model_version, serving_path
)
)
|
def deploy_model_for_cmle_serving(
serving_path: Text, model_version: Text, cmle_serving_args: Dict[Text, Any]
):
"""Deploys a model for serving with CMLE.
Args:
serving_path: The path to the model. Must be a GCS URI.
model_version: Version of the model being deployed. Must be different from
what is currently being served.
cmle_serving_args: Dictionary containing arguments for pushing to CMLE. For
the full set of parameters supported, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions#Version
Raises:
RuntimeError: if an error is encountered when trying to push.
"""
tf.logging.info(
"Deploying to model with version {} to CMLE for serving: {}".format(
model_version, cmle_serving_args
)
)
model_name = cmle_serving_args["model_name"]
project_id = cmle_serving_args["project_id"]
runtime_version = _get_tf_runtime_version()
python_version = _get_caip_python_version()
api = discovery.build("ml", "v1")
body = {"name": model_name}
parent = "projects/{}".format(project_id)
try:
api.projects().models().create(body=body, parent=parent).execute()
except errors.HttpError as e:
# If the error is to create an already existing model, it's ok to ignore.
# TODO(b/135211463): Remove the disable once the pytype bug is fixed.
if e.resp.status == "409": # pytype: disable=attribute-error
tf.logging.warn("Model {} already exists".format(model_name))
else:
raise RuntimeError("CMLE Push failed: {}".format(e))
body = {
"name": "v{}".format(model_version),
"deployment_uri": serving_path,
"runtime_version": runtime_version,
"python_version": python_version,
}
# Push to CMLE, and record the operation name so we can poll for its state.
model_name = "projects/{}/models/{}".format(project_id, model_name)
response = (
api.projects()
.models()
.versions()
.create(body=body, parent=model_name)
.execute()
)
op_name = response["name"]
while True:
deploy_status = api.projects().operations().get(name=op_name).execute()
if deploy_status.get("done"):
break
if "error" in deploy_status:
# The operation completed with an error.
tf.logging.error(deploy_status["error"])
raise RuntimeError(
"Failed to deploy model to CMLE for serving: {}".format(
deploy_status["error"]
)
)
time.sleep(_POLLING_INTERVAL_IN_SECONDS)
tf.logging.info("Model still being deployed...")
tf.logging.info(
"Successfully deployed model {} with version {}, serving from {}".format(
model_name, model_version, serving_path
)
)
|
https://github.com/tensorflow/tfx/issues/712
|
Traceback (most recent call last):
File "/opt/venv/lib/python3.6/site-packages/tfx/extensions/google_cloud_ai_platform/runner.py", line 176, in deploy_model_for_cmle_serving
api.projects().models().create(body=body, parent=parent).execute()
File "/opt/venv/lib/python3.6/site-packages/googleapiclient/_helpers.py", line 130, in positional_wrapper
return wrapped(*args, **kwargs)
File "/opt/venv/lib/python3.6/site-packages/googleapiclient/http.py", line 856, in execute
raise HttpError(resp, content, uri=self.uri)
googleapiclient.errors.HttpError: <HttpError 409 when requesting
https://ml.googleapis.com/v1/projects/xxxxxxxx/models?alt=json
returned "Field: model.name Error: A model with the same name already exists.". Details: "[{'@type': 'type.googleapis.com/google.rpc.BadRequest', 'fieldViolations': [{'field': 'model.name', 'description': 'A model with the same name already exists.'}]}]">
|
googleapiclient.errors.HttpError
|
def _create_pipeline():
"""Implements the chicago taxi pipeline with TFX."""
query = """
SELECT
pickup_community_area,
fare,
EXTRACT(MONTH FROM trip_start_timestamp) AS trip_start_month,
EXTRACT(HOUR FROM trip_start_timestamp) AS trip_start_hour,
EXTRACT(DAYOFWEEK FROM trip_start_timestamp) AS trip_start_day,
UNIX_SECONDS(trip_start_timestamp) AS trip_start_timestamp,
pickup_latitude,
pickup_longitude,
dropoff_latitude,
dropoff_longitude,
trip_miles,
pickup_census_tract,
dropoff_census_tract,
payment_type,
company,
trip_seconds,
dropoff_community_area,
tips
FROM `bigquery-public-data.chicago_taxi_trips.taxi_trips`
WHERE RAND() < {}""".format(_query_sample_rate)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = BigQueryExampleGen(query=query)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(input_data=example_gen.outputs.examples)
# Generates schema based on statistics files.
infer_schema = SchemaGen(stats=statistics_gen.outputs.output)
# Performs anomaly detection based on statistics and data schema.
validate_stats = ExampleValidator(
stats=statistics_gen.outputs.output, schema=infer_schema.outputs.output
)
# Performs transformations and feature engineering in training and serving.
transform = Transform(
input_data=example_gen.outputs.examples,
schema=infer_schema.outputs.output,
module_file=_taxi_utils,
)
# Uses user-provided Python function that implements a model using TF-Learn.
trainer = Trainer(
module_file=_taxi_utils,
transformed_examples=transform.outputs.transformed_examples,
schema=infer_schema.outputs.output,
transform_output=transform.outputs.transform_output,
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000),
custom_config={"cmle_training_args": _cmle_training_args},
)
# Uses TFMA to compute a evaluation statistics over features of a model.
model_analyzer = Evaluator(
examples=example_gen.outputs.examples,
model_exports=trainer.outputs.output,
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(
specs=[
evaluator_pb2.SingleSlicingSpec(column_for_slicing=["trip_start_hour"])
]
),
)
# Performs quality validation of a candidate model (compared to a baseline).
model_validator = ModelValidator(
examples=example_gen.outputs.examples, model=trainer.outputs.output
)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model_export=trainer.outputs.output,
model_blessing=model_validator.outputs.blessing,
custom_config={"cmle_serving_args": _cmle_serving_args},
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=_serving_model_dir
)
),
)
return [
example_gen,
statistics_gen,
infer_schema,
validate_stats,
transform,
trainer,
model_analyzer,
model_validator,
pusher,
]
|
def _create_pipeline():
"""Implements the chicago taxi pipeline with TFX."""
query = """
SELECT
pickup_community_area,
fare,
EXTRACT(MONTH FROM trip_start_timestamp) AS trip_start_month,
EXTRACT(HOUR FROM trip_start_timestamp) AS trip_start_hour,
EXTRACT(DAYOFWEEK FROM trip_start_timestamp) AS trip_start_day,
UNIX_SECONDS(trip_start_timestamp) AS trip_start_timestamp,
pickup_latitude,
pickup_longitude,
dropoff_latitude,
dropoff_longitude,
trip_miles,
pickup_census_tract,
dropoff_census_tract,
payment_type,
company,
trip_seconds,
dropoff_community_area,
tips
FROM `bigquery-public-data.chicago_taxi_trips.taxi_trips`
WHERE RAND() < {}""".format(_query_sample_rate)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = BigQueryExampleGen(query=query)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(input_data=example_gen.outputs.examples)
# Generates schema based on statistics files.
infer_schema = SchemaGen(stats=statistics_gen.outputs.output)
# Performs anomaly detection based on statistics and data schema.
validate_stats = ExampleValidator(
stats=statistics_gen.outputs.output, schema=infer_schema.outputs.output
)
# Performs transformations and feature engineering in training and serving.
transform = Transform(
input_data=example_gen.outputs.examples,
schema=infer_schema.outputs.output,
module_file=_taxi_utils,
)
# Uses user-provided Python function that implements a model using TF-Learn.
trainer = Trainer(
module_file=_taxi_utils,
transformed_examples=transform.outputs.transformed_examples,
schema=infer_schema.outputs.output,
transform_output=transform.outputs.transform_output,
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000),
custom_config={"cmle_training_args": _cmle_training_args},
)
# Uses TFMA to compute a evaluation statistics over features of a model.
model_analyzer = Evaluator(
examples=example_gen.outputs.examples,
model_exports=trainer.outputs.output,
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(
specs=[
evaluator_pb2.SingleSlicingSpec(column_for_slicing=["trip_start_hour"])
]
),
)
# Performs quality validation of a candidate model (compared to a baseline).
model_validator = ModelValidator(
examples=example_gen.outputs.examples, model=trainer.outputs.output
)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model_export=trainer.outputs.output,
model_blessing=model_validator.outputs.blessing,
custom_config={"cmle_serving_args": _cmle_serving_args},
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=_serving_model_dir
)
),
)
return pipeline.Pipeline(
pipeline_name="chicago_taxi_pipeline_kubeflow",
pipeline_root=_pipeline_root,
components=[
example_gen,
statistics_gen,
infer_schema,
validate_stats,
transform,
trainer,
model_analyzer,
model_validator,
pusher,
],
log_root="/var/tmp/tfx/logs",
additional_pipeline_args={
"beam_pipeline_args": [
"--runner=DataflowRunner",
"--experiments=shuffle_mode=auto",
"--project=" + _project_id,
"--temp_location=" + os.path.join(_output_bucket, "tmp"),
"--region=" + _gcp_region,
],
# Optional args:
# 'tfx_image': custom docker image to use for components.
},
)
|
https://github.com/tensorflow/tfx/issues/57
|
[2019-04-25 18:57:19,644] {__init__.py:416} ERROR - Failed to import: /home/j3soon/airflow/dags/taxi_pipeline_solution.py
Traceback (most recent call last):
File "/home/j3soon/tfx-env/local/lib/python2.7/site-packages/airflow/models/__init__.py", line 413, in process_file
m = imp.load_source(mod_name, filepath)
File "/home/j3soon/airflow/dags/taxi_pipeline_solution.py", line 147, in <module>
taxi_pipeline = AirflowDAGRunner(_airflow_config).run(_create_pipeline())
File "/home/j3soon/tfx-env/local/lib/python2.7/site-packages/tfx/orchestration/airflow/airflow_runner.py", line 45, in run
airflow_dag = airflow_pipeline.AirflowPipeline(**self._config)
TypeError: __init__() got an unexpected keyword argument 'components'
Running the Gunicorn Server with:
Workers: 4 sync
Host: 0.0.0.0:8080
Timeout: 120
Logfiles: - -
|
TypeError
|
def _create_pipeline():
"""Implements the chicago taxi pipeline with TFX."""
examples = csv_input(_data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=examples)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(input_data=example_gen.outputs.examples)
# Generates schema based on statistics files.
infer_schema = SchemaGen(stats=statistics_gen.outputs.output)
# Performs anomaly detection based on statistics and data schema.
validate_stats = ExampleValidator(
stats=statistics_gen.outputs.output, schema=infer_schema.outputs.output
)
# Performs transformations and feature engineering in training and serving.
transform = Transform(
input_data=example_gen.outputs.examples,
schema=infer_schema.outputs.output,
module_file=_taxi_module_file,
)
# Uses user-provided Python function that implements a model using TF-Learn.
trainer = Trainer(
module_file=_taxi_module_file,
transformed_examples=transform.outputs.transformed_examples,
schema=infer_schema.outputs.output,
transform_output=transform.outputs.transform_output,
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000),
)
# Uses TFMA to compute a evaluation statistics over features of a model.
model_analyzer = Evaluator(
examples=example_gen.outputs.examples,
model_exports=trainer.outputs.output,
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(
specs=[
evaluator_pb2.SingleSlicingSpec(column_for_slicing=["trip_start_hour"])
]
),
)
# Performs quality validation of a candidate model (compared to a baseline).
model_validator = ModelValidator(
examples=example_gen.outputs.examples, model=trainer.outputs.output
)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model_export=trainer.outputs.output,
model_blessing=model_validator.outputs.blessing,
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=_serving_model_dir
)
),
)
return [
example_gen,
statistics_gen,
infer_schema,
validate_stats,
transform,
trainer,
model_analyzer,
model_validator,
pusher,
]
|
def _create_pipeline():
"""Implements the chicago taxi pipeline with TFX."""
examples = csv_input(_data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=examples)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(input_data=example_gen.outputs.examples)
# Generates schema based on statistics files.
infer_schema = SchemaGen(stats=statistics_gen.outputs.output)
# Performs anomaly detection based on statistics and data schema.
validate_stats = ExampleValidator(
stats=statistics_gen.outputs.output, schema=infer_schema.outputs.output
)
# Performs transformations and feature engineering in training and serving.
transform = Transform(
input_data=example_gen.outputs.examples,
schema=infer_schema.outputs.output,
module_file=_taxi_module_file,
)
# Uses user-provided Python function that implements a model using TF-Learn.
trainer = Trainer(
module_file=_taxi_module_file,
transformed_examples=transform.outputs.transformed_examples,
schema=infer_schema.outputs.output,
transform_output=transform.outputs.transform_output,
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000),
)
# Uses TFMA to compute a evaluation statistics over features of a model.
model_analyzer = Evaluator(
examples=example_gen.outputs.examples,
model_exports=trainer.outputs.output,
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(
specs=[
evaluator_pb2.SingleSlicingSpec(column_for_slicing=["trip_start_hour"])
]
),
)
# Performs quality validation of a candidate model (compared to a baseline).
model_validator = ModelValidator(
examples=example_gen.outputs.examples, model=trainer.outputs.output
)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model_export=trainer.outputs.output,
model_blessing=model_validator.outputs.blessing,
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=_serving_model_dir
)
),
)
return pipeline.Pipeline(
pipeline_name="chicago_taxi_simple",
pipeline_root=_pipeline_root,
components=[
example_gen,
statistics_gen,
infer_schema,
validate_stats,
transform,
trainer,
model_analyzer,
model_validator,
pusher,
],
enable_cache=True,
metadata_db_root=_metadata_db_root,
additional_pipeline_args={"logger_args": logger_overrides},
)
|
https://github.com/tensorflow/tfx/issues/57
|
[2019-04-25 18:57:19,644] {__init__.py:416} ERROR - Failed to import: /home/j3soon/airflow/dags/taxi_pipeline_solution.py
Traceback (most recent call last):
File "/home/j3soon/tfx-env/local/lib/python2.7/site-packages/airflow/models/__init__.py", line 413, in process_file
m = imp.load_source(mod_name, filepath)
File "/home/j3soon/airflow/dags/taxi_pipeline_solution.py", line 147, in <module>
taxi_pipeline = AirflowDAGRunner(_airflow_config).run(_create_pipeline())
File "/home/j3soon/tfx-env/local/lib/python2.7/site-packages/tfx/orchestration/airflow/airflow_runner.py", line 45, in run
airflow_dag = airflow_pipeline.AirflowPipeline(**self._config)
TypeError: __init__() got an unexpected keyword argument 'components'
Running the Gunicorn Server with:
Workers: 4 sync
Host: 0.0.0.0:8080
Timeout: 120
Logfiles: - -
|
TypeError
|
def __str__(self) -> str:
return "Unacceptable field format, use **provider.method**."
|
def __str__(self) -> str:
return "Undefined field. Filed cannot be None."
|
https://github.com/lk-geimfari/mimesis/issues/619
|
from mimesis.schema import Field, Schema
_ = Field('en')
_('choice', items=[1,2,3])
Traceback (most recent call last):
File "C:\dev\fakemimesis\.pyenv\lib\site-packages\mimesis\schema.py", line 100, in __call__
result = self._table[name](**kwargs)
KeyError: 'choice'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<pyshell#24>", line 1, in <module>
_('choice', items=[1,2,3])
File "C:\dev\fakemimesis\.pyenv\lib\site-packages\mimesis\schema.py", line 105, in __call__
raise UnsupportedField(name)
mimesis.exceptions.UnsupportedField: Field «choice» is not supported.
|
KeyError
|
def __call__(
self, name: Optional[str] = None, key: Optional[Callable] = None, **kwargs
) -> Any:
"""Override standard call.
This magic method override standard call so it's take any string which
represents name of the any method of any supported data provider
and the ``**kwargs`` of this method.
..note:: Some data providers have methods with the same name and
in such cases, you can explicitly define that the method belongs to
data-provider ``name='provider.name'``.
You can apply a *key function* to result returned by the method,
to do it, just pass parameter **key** with a callable object which
returns final result.
:param name: Name of the method.
:param key: A key function (or other callable object)
which will be applied to result.
:param kwargs: Kwargs of method.
:return: Value which represented by method.
:raises ValueError: if provider is not
supported or if field is not defined.
"""
if name is None:
raise UndefinedField()
def tail_parser(tails: str, obj: Any) -> Any:
"""Return method from end of tail.
:param tails: Tail string
:param obj: Search tail from this object
:return last tailed method
"""
provider_name, method_name = tails.split(".", 1)
if "." in method_name:
raise UnacceptableField()
attr = getattr(obj, provider_name)
if attr is not None:
return getattr(attr, method_name)
try:
if name not in self._table:
if "." not in name:
# Fix https://github.com/lk-geimfari/mimesis/issues/619
if name == self._gen.choice.Meta.name:
self._table[name] = self._gen.choice
else:
for provider in dir(self._gen):
provider = getattr(self._gen, provider)
if name in dir(provider):
self._table[name] = getattr(provider, name)
else:
self._table[name] = tail_parser(name, self._gen)
result = self._table[name](**kwargs)
if key and callable(key):
return key(result)
return result
except KeyError:
raise UnsupportedField(name)
|
def __call__(
self, name: Optional[str] = None, key: Optional[Callable] = None, **kwargs
) -> Any:
"""Override standard call.
This magic method override standard call so it's take any string which
represents name of the any method of any supported data provider
and the ``**kwargs`` of this method.
..note:: Some data providers have methods with the same name and
in such cases, you can explicitly define that the method belongs to
data-provider ``name='provider.name'``.
You can apply a *key function* to result returned by the method,
to do it, just pass parameter **key** with a callable object which
returns final result.
:param name: Name of the method.
:param key: A key function (or other callable object)
which will be applied to result.
:param kwargs: Kwargs of method.
:return: Value which represented by method.
:raises ValueError: if provider is not
supported or if field is not defined.
"""
if name is None:
raise UndefinedField()
def tail_parser(tails: str, obj: Any) -> Any:
"""Return method from end of tail.
:param tails: Tail string
:param obj: Search tail from this object
:return last tailed method
"""
first, second = tails.split(".", 1)
if hasattr(obj, first):
attr = getattr(obj, first)
if "." in second:
return tail_parser(attr, second)
else:
return getattr(attr, second)
try:
if name not in self._table:
if "." not in name:
for provider in dir(self._gen):
provider = getattr(self._gen, provider)
if name in dir(provider):
self._table[name] = getattr(provider, name)
else:
self._table[name] = tail_parser(name, self._gen)
result = self._table[name](**kwargs)
if key and callable(key):
return key(result)
return result
except Exception:
raise UnsupportedField(name)
|
https://github.com/lk-geimfari/mimesis/issues/619
|
from mimesis.schema import Field, Schema
_ = Field('en')
_('choice', items=[1,2,3])
Traceback (most recent call last):
File "C:\dev\fakemimesis\.pyenv\lib\site-packages\mimesis\schema.py", line 100, in __call__
result = self._table[name](**kwargs)
KeyError: 'choice'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<pyshell#24>", line 1, in <module>
_('choice', items=[1,2,3])
File "C:\dev\fakemimesis\.pyenv\lib\site-packages\mimesis\schema.py", line 105, in __call__
raise UnsupportedField(name)
mimesis.exceptions.UnsupportedField: Field «choice» is not supported.
|
KeyError
|
def tail_parser(tails: str, obj: Any) -> Any:
"""Return method from end of tail.
:param tails: Tail string
:param obj: Search tail from this object
:return last tailed method
"""
provider_name, method_name = tails.split(".", 1)
if "." in method_name:
raise UnacceptableField()
attr = getattr(obj, provider_name)
if attr is not None:
return getattr(attr, method_name)
|
def tail_parser(tails: str, obj: Any) -> Any:
"""Return method from end of tail.
:param tails: Tail string
:param obj: Search tail from this object
:return last tailed method
"""
first, second = tails.split(".", 1)
if hasattr(obj, first):
attr = getattr(obj, first)
if "." in second:
return tail_parser(attr, second)
else:
return getattr(attr, second)
|
https://github.com/lk-geimfari/mimesis/issues/619
|
from mimesis.schema import Field, Schema
_ = Field('en')
_('choice', items=[1,2,3])
Traceback (most recent call last):
File "C:\dev\fakemimesis\.pyenv\lib\site-packages\mimesis\schema.py", line 100, in __call__
result = self._table[name](**kwargs)
KeyError: 'choice'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<pyshell#24>", line 1, in <module>
_('choice', items=[1,2,3])
File "C:\dev\fakemimesis\.pyenv\lib\site-packages\mimesis\schema.py", line 105, in __call__
raise UnsupportedField(name)
mimesis.exceptions.UnsupportedField: Field «choice» is not supported.
|
KeyError
|
def _bind_to_schema(self, field_name, schema):
super()._bind_to_schema(field_name, schema)
self.format = (
self.format
or getattr(self.root.opts, self.SCHEMA_OPTS_VAR_NAME)
or self.DEFAULT_FORMAT
)
|
def _bind_to_schema(self, field_name, schema):
super()._bind_to_schema(field_name, schema)
self.format = (
self.format
or getattr(schema.opts, self.SCHEMA_OPTS_VAR_NAME)
or self.DEFAULT_FORMAT
)
|
https://github.com/marshmallow-code/marshmallow/issues/1357
|
Traceback (most recent call last):
File "test-mm.py", line 8, in <module>
s = MySchema()
File "/Users/victor/.pyenv/versions/marshmallow/lib/python3.6/site-packages/marshmallow/schema.py", line 383, in __init__
self.fields = self._init_fields()
File "/Users/victor/.pyenv/versions/marshmallow/lib/python3.6/site-packages/marshmallow/schema.py", line 913, in _init_fields
self._bind_field(field_name, field_obj)
File "/Users/victor/.pyenv/versions/marshmallow/lib/python3.6/site-packages/marshmallow/schema.py", line 969, in _bind_field
field_obj._bind_to_schema(field_name, self)
File "/Users/victor/.pyenv/versions/marshmallow/lib/python3.6/site-packages/marshmallow/fields.py", line 636, in _bind_to_schema
self.inner._bind_to_schema(field_name, self)
File "/Users/victor/.pyenv/versions/marshmallow/lib/python3.6/site-packages/marshmallow/fields.py", line 1117, in _bind_to_schema
or getattr(schema.opts, self.SCHEMA_OPTS_VAR_NAME)
AttributeError: 'List' object has no attribute 'opts'
|
AttributeError
|
def _invoke_field_validators(self, unmarshal, data, many):
for attr_name in self.__processors__[(VALIDATES, False)]:
validator = getattr(self, attr_name)
validator_kwargs = validator.__marshmallow_kwargs__[(VALIDATES, False)]
field_name = validator_kwargs["field_name"]
try:
field_obj = self.fields[field_name]
except KeyError:
if field_name in self.declared_fields:
continue
raise ValueError('"{0}" field does not exist.'.format(field_name))
if many:
for idx, item in enumerate(data):
try:
value = item[field_obj.attribute or field_name]
except (KeyError, TypeError):
pass
else:
validated_value = unmarshal.call_and_store(
getter_func=validator,
data=value,
field_name=field_obj.load_from or field_name,
field_obj=field_obj,
index=(idx if self.opts.index_errors else None),
)
if validated_value is missing:
data[idx].pop(field_name, None)
else:
try:
value = data[field_obj.attribute or field_name]
except (KeyError, TypeError):
pass
else:
validated_value = unmarshal.call_and_store(
getter_func=validator,
data=value,
field_name=field_obj.load_from or field_name,
field_obj=field_obj,
)
if validated_value is missing:
data.pop(field_name, None)
|
def _invoke_field_validators(self, unmarshal, data, many):
for attr_name in self.__processors__[(VALIDATES, False)]:
validator = getattr(self, attr_name)
validator_kwargs = validator.__marshmallow_kwargs__[(VALIDATES, False)]
field_name = validator_kwargs["field_name"]
try:
field_obj = self.fields[field_name]
except KeyError:
if field_name in self.declared_fields:
continue
raise ValueError('"{0}" field does not exist.'.format(field_name))
if many:
for idx, item in enumerate(data):
try:
value = item[field_obj.attribute or field_name]
except KeyError:
pass
else:
validated_value = unmarshal.call_and_store(
getter_func=validator,
data=value,
field_name=field_obj.load_from or field_name,
field_obj=field_obj,
index=(idx if self.opts.index_errors else None),
)
if validated_value is missing:
data[idx].pop(field_name, None)
else:
try:
value = data[field_obj.attribute or field_name]
except KeyError:
pass
else:
validated_value = unmarshal.call_and_store(
getter_func=validator,
data=value,
field_name=field_obj.load_from or field_name,
field_obj=field_obj,
)
if validated_value is missing:
data.pop(field_name, None)
|
https://github.com/marshmallow-code/marshmallow/issues/1342
|
Traceback (most recent call last):
File "/_/bug_mschema.py", line 19, in <module>
'bar': 'invalid',
File "/_/env/lib/python3.7/site-packages/marshmallow/schema.py", line 628, in validate
_, errors = self._do_load(data, many, partial=partial, postprocess=False)
File "/_/env/lib/python3.7/site-packages/marshmallow/schema.py", line 670, in _do_load
index_errors=self.opts.index_errors,
File "/_/env/lib/python3.7/site-packages/marshmallow/marshalling.py", line 292, in deserialize
index=(index if index_errors else None)
File "/_/env/lib/python3.7/site-packages/marshmallow/marshalling.py", line 65, in call_and_store
value = getter_func(data)
File "/_/env/lib/python3.7/site-packages/marshmallow/marshalling.py", line 285, in <lambda>
data
File "/_/env/lib/python3.7/site-packages/marshmallow/fields.py", line 265, in deserialize
output = self._deserialize(value, attr, data)
File "/_/env/lib/python3.7/site-packages/marshmallow/fields.py", line 465, in _deserialize
data, errors = self.schema.load(value)
File "/_/env/lib/python3.7/site-packages/marshmallow/schema.py", line 588, in load
result, errors = self._do_load(data, many, partial=partial, postprocess=True)
File "/_/env/lib/python3.7/site-packages/marshmallow/schema.py", line 674, in _do_load
self._invoke_field_validators(unmarshal, data=result, many=many)
File "/_/env/lib/python3.7/site-packages/marshmallow/schema.py", line 894, in _invoke_field_validators
value = data[field_obj.attribute or field_name]
TypeError: 'NoneType' object is not subscriptable
|
TypeError
|
def _deserialize(self, value, attr, data):
if not value: # Falsy values, e.g. '', None, [] are not valid
raise self.fail("invalid", obj_type=self.OBJ_TYPE)
data_format = self.format or self.DEFAULT_FORMAT
func = self.DESERIALIZATION_FUNCS.get(data_format)
if func:
try:
return func(value)
except (TypeError, AttributeError, ValueError):
raise self.fail("invalid", obj_type=self.OBJ_TYPE)
else:
try:
return dt.datetime.strptime(value, data_format)
except (TypeError, AttributeError, ValueError):
raise self.fail("invalid", obj_type=self.OBJ_TYPE)
|
def _deserialize(self, value, attr, data):
if not value: # Falsy values, e.g. '', None, [] are not valid
raise self.fail("invalid", obj_type=self.OBJ_TYPE)
data_format = self.format or self.DEFAULT_FORMAT
func = self.DESERIALIZATION_FUNCS.get(data_format)
if func:
try:
return func(value)
except (TypeError, AttributeError, ValueError):
raise self.fail("invalid", obj_type=self.OBJ_TYPE)
elif data_format:
try:
return dt.datetime.strptime(value, data_format)
except (TypeError, AttributeError, ValueError):
raise self.fail("invalid", obj_type=self.OBJ_TYPE)
elif utils.dateutil_available:
try:
parsed = utils.from_datestring(value)
return self._create_data_object_from_parsed_value(parsed)
except (TypeError, ValueError):
raise self.fail("invalid", obj_type=self.OBJ_TYPE)
else:
warnings.warn(
"It is recommended that you install python-dateutil "
"for improved datetime deserialization.",
)
raise self.fail("invalid", obj_type=self.OBJ_TYPE)
|
https://github.com/marshmallow-code/marshmallow/issues/758
|
import marshmallow as ma
f = ma.fields.DateTime()
f.deserialize('2018/03/26')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "marshmallow/fields.py", line 261, in deserialize
output = self._deserialize(value, attr, data)
File "marshmallow/fields.py", line 929, in _deserialize
raise self.fail('invalid')
File "marshmallow/fields.py", line 216, in fail
raise ValidationError(msg)
marshmallow.exceptions.ValidationError: Not a valid datetime.
|
marshmallow.exceptions.ValidationError
|
def __filter_fields(self, field_names, obj, many=False):
"""Return only those field_name:field_obj pairs specified by
``field_names``.
:param set field_names: Field names to include in the final
return dictionary.
:returns: An dict of field_name:field_obj pairs.
"""
if obj and many:
try: # Homogeneous collection
# Prefer getitem over iter to prevent breaking serialization
# of objects for which iter will modify position in the collection
# e.g. Pymongo cursors
if hasattr(obj, "__getitem__") and callable(getattr(obj, "__getitem__")):
try:
obj_prototype = obj[0]
except KeyError:
obj_prototype = next(iter(obj))
else:
obj_prototype = next(iter(obj))
except (StopIteration, IndexError): # Nothing to serialize
return dict(
(k, v) for k, v in self.declared_fields.items() if k in field_names
)
obj = obj_prototype
ret = self.dict_class()
for key in field_names:
if key in self.declared_fields:
ret[key] = self.declared_fields[key]
else: # Implicit field creation (class Meta 'fields' or 'additional')
if obj:
attribute_type = None
try:
if isinstance(obj, Mapping):
attribute_type = type(obj[key])
else:
attribute_type = type(getattr(obj, key))
except (AttributeError, KeyError) as err:
err_type = type(err)
raise err_type(
'"{0}" is not a valid field for {1}.'.format(key, obj)
)
field_obj = self.TYPE_MAPPING.get(attribute_type, fields.Field)()
else: # Object is None
field_obj = fields.Field()
# map key -> field (default to Raw)
ret[key] = field_obj
return ret
|
def __filter_fields(self, field_names, obj, many=False):
"""Return only those field_name:field_obj pairs specified by
``field_names``.
:param set field_names: Field names to include in the final
return dictionary.
:returns: An dict of field_name:field_obj pairs.
"""
if obj and many:
try: # Homogeneous collection
# Prefer getitem over iter to prevent breaking serialization
# of objects for which iter will modify position in the collection
# e.g. Pymongo cursors
if hasattr(obj, "__getitem__") and callable(getattr(obj, "__getitem__")):
try:
obj_prototype = obj[0]
except KeyError:
obj_prototype = next(iter(obj))
else:
obj_prototype = next(iter(obj))
except (StopIteration, IndexError): # Nothing to serialize
return self.declared_fields
obj = obj_prototype
ret = self.dict_class()
for key in field_names:
if key in self.declared_fields:
ret[key] = self.declared_fields[key]
else: # Implicit field creation (class Meta 'fields' or 'additional')
if obj:
attribute_type = None
try:
if isinstance(obj, Mapping):
attribute_type = type(obj[key])
else:
attribute_type = type(getattr(obj, key))
except (AttributeError, KeyError) as err:
err_type = type(err)
raise err_type(
'"{0}" is not a valid field for {1}.'.format(key, obj)
)
field_obj = self.TYPE_MAPPING.get(attribute_type, fields.Field)()
else: # Object is None
field_obj = fields.Field()
# map key -> field (default to Raw)
ret[key] = field_obj
return ret
|
https://github.com/marshmallow-code/marshmallow/issues/948
|
Traceback (most recent call last):
File "scratch/missing-field-nested-bug.py", line 72, in <module>
result = HostSchema().dump(host)
File "/Users/kai/.virtualenvs/SPEZZI/lib/python2.7/site-packages/marshmallow/schema.py", line 509, in dump
**kwargs
File "/Users/kai/.virtualenvs/SPEZZI/lib/python2.7/site-packages/marshmallow/marshalling.py", line 138, in serialize
index=(index if index_errors else None)
File "/Users/kai/.virtualenvs/SPEZZI/lib/python2.7/site-packages/marshmallow/marshalling.py", line 62, in call_and_store
value = getter_func(data)
File "/Users/kai/.virtualenvs/SPEZZI/lib/python2.7/site-packages/marshmallow/marshalling.py", line 132, in <lambda>
getter = lambda d: field_obj.serialize(attr_name, d, accessor=accessor)
File "/Users/kai/.virtualenvs/SPEZZI/lib/python2.7/site-packages/marshmallow/fields.py", line 252, in serialize
return self._serialize(value, attr, obj)
File "/Users/kai/.virtualenvs/SPEZZI/lib/python2.7/site-packages/marshmallow/fields.py", line 447, in _serialize
schema._update_fields(obj=nested_obj, many=self.many)
File "/Users/kai/.virtualenvs/SPEZZI/lib/python2.7/site-packages/marshmallow/schema.py", line 767, in _update_fields
self.__set_field_attrs(ret)
File "/Users/kai/.virtualenvs/SPEZZI/lib/python2.7/site-packages/marshmallow/schema.py", line 788, in __set_field_attrs
field_obj._add_to_schema(field_name, self)
AttributeError: 'NoneType' object has no attribute '_add_to_schema'
|
AttributeError
|
def call_and_store(self, getter_func, data, field_name, field_obj, index=None):
"""Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`.
:param callable getter_func: Function for getting the serialized/deserialized
value from ``data``.
:param data: The data passed to ``getter_func``.
:param str field_name: Field name.
:param FieldABC field_obj: Field object that performs the
serialization/deserialization behavior.
:param int index: Index of the item being validated, if validating a collection,
otherwise `None`.
"""
try:
value = getter_func(data)
except ValidationError as err: # Store validation errors
self.error_fields.append(field_obj)
self.error_field_names.append(field_name)
errors = self.get_errors(index=index)
# Warning: Mutation!
if isinstance(err.messages, dict):
errors[field_name] = err.messages
elif isinstance(errors.get(field_name), dict):
errors[field_name].setdefault(FIELD, []).extend(err.messages)
else:
errors.setdefault(field_name, []).extend(err.messages)
# When a Nested field fails validation, the marshalled data is stored
# on the ValidationError's data attribute
value = err.data or missing
return value
|
def call_and_store(self, getter_func, data, field_name, field_obj, index=None):
"""Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`.
:param callable getter_func: Function for getting the serialized/deserialized
value from ``data``.
:param data: The data passed to ``getter_func``.
:param str field_name: Field name.
:param FieldABC field_obj: Field object that performs the
serialization/deserialization behavior.
:param int index: Index of the item being validated, if validating a collection,
otherwise `None`.
"""
try:
value = getter_func(data)
except ValidationError as err: # Store validation errors
self.error_fields.append(field_obj)
self.error_field_names.append(field_name)
errors = self.get_errors(index=index)
# Warning: Mutation!
if isinstance(err.messages, dict):
errors[field_name] = err.messages
else:
errors.setdefault(field_name, []).extend(err.messages)
# When a Nested field fails validation, the marshalled data is stored
# on the ValidationError's data attribute
value = err.data or missing
return value
|
https://github.com/marshmallow-code/marshmallow/issues/298
|
Traceback (most recent call last):
File "/home/andrew/misc/marshmallow/tests/test_schema.py", line 1238, in test_all_errors_on_many_nested_field_with_validates_decorator
_, errors = outer.load({'inner': [{}]})
File "/home/andrew/misc/marshmallow/marshmallow/schema.py", line 539, in load
result, errors = self._do_load(data, many, partial=partial, postprocess=True)
File "/home/andrew/misc/marshmallow/marshmallow/schema.py", line 610, in _do_load
self._invoke_field_validators(data=result, many=many)
File "/home/andrew/misc/marshmallow/marshmallow/schema.py", line 789, in _invoke_field_validators
field_obj=field_obj
File "/home/andrew/misc/marshmallow/marshmallow/marshalling.py", line 74, in call_and_store
errors.setdefault(field_name, []).extend(err.messages)
AttributeError: 'dict' object has no attribute 'extend'
|
AttributeError
|
def get_enrichment_input(self, output_file_path):
"""This function converts modules mode output into input for anvi-script-enrichment-stats
The input format for anvi-script-enrichment-stats is described in a comment at the top of that script, and here is
how we get the values for each column:
The first column, 'KEGG_MODULE', and second column 'accession', are already in the modules mode output as 'module_name'
and 'kegg_module', respectively.
The 'N_*' columns are the total number of samples in each group.
For each module, this function determines which samples the module is 'present' in according to the specified completion threshold.
This determines the list of samples for the 'sample_ids' column as well as the 'p_*' proportions for each group of samples.
Finally, the fourth column, 'associated_groups', is computed from the 'p_*' proportions and 'N_*' totals.
PARAMETERS
==========
output_file_path : str
a file path where we will store the (temporary) input file for the enrichment script
"""
filesnpaths.is_output_file_writable(output_file_path)
# read the files into dataframes
modules_df = pd.read_csv(self.modules_txt, sep="\t")
# make sure we have all the columns we need in modules mode output, since this output can be customized
required_modules_txt_headers = ["kegg_module", "module_completeness", "module_name"]
missing_headers = []
for h in required_modules_txt_headers:
if h not in modules_df.columns:
missing_headers.append(h)
if missing_headers:
missing_string = ", ".join(missing_headers)
self.progress.reset()
raise ConfigError(
"We cannot go on! *dramatic sweep* We trust that you have provided us with "
"modules mode output, but unfortunately the modules-txt input does not contain "
f"the following required headers: {missing_string} Please re-generate your "
"modules-txt to include these before trying again."
)
if "unique_id" in modules_df.columns:
modules_df = modules_df.drop(columns=["unique_id"])
# samples column sanity check - this column will become the index
if self.sample_header_in_modules_txt not in modules_df.columns:
col_list = ", ".join(modules_df.columns)
self.progress.reset()
raise ConfigError(
f"You have specified that your sample names are in the column with header '{self.sample_header_in_modules_txt}' "
"in the modules-txt file, but that column does not exist. :( Please figure out which column is right and submit "
"it using the --sample-header parameter. Just so you know, the columns in modules-txt that you can choose from "
f"are: {col_list}"
)
required_groups_txt_headers = ["sample", "group"]
sample_groups_dict = utils.get_TAB_delimited_file_as_dictionary(
self.groups_txt, expected_fields=required_groups_txt_headers
)
samples_to_groups_dict = {
samp: sample_groups_dict[samp]["group"] for samp in sample_groups_dict.keys()
}
# make sure the samples all have a group
samples_with_none_group = []
for s, g in samples_to_groups_dict.items():
if not g:
samples_with_none_group.append(s)
if self.include_ungrouped:
samples_to_groups_dict[s] = "UNGROUPED"
if not self.include_ungrouped:
for s in samples_with_none_group:
samples_to_groups_dict.pop(s)
if samples_with_none_group:
self.progress.reset()
none_group_str = ", ".join(samples_with_none_group)
if self.include_ungrouped:
self.run.warning(
"Some samples in your groups-txt did not have a group, but since you elected to --include-ungrouped, "
"we will consider all of those samples to belong to one group called 'UNGROUPED'. Here are those "
f"UNGROUPED samples: {none_group_str}"
)
else:
self.run.warning(
"Some samples in your groups-txt did not have a group, and we will ignore those samples. If you "
"want them to be included in the analysis (but without assigning a group), you can simply re-run "
"this program with the --include-ungrouped flag. Now. Here are the samples we will be ignoring: "
f"{none_group_str}"
)
# sanity check for mismatch between modules-txt and groups-txt
sample_names_in_modules_txt = set(
modules_df[self.sample_header_in_modules_txt].unique()
)
sample_names_in_groups_txt = set(sample_groups_dict.keys())
samples_missing_in_groups_txt = sample_names_in_modules_txt.difference(
sample_names_in_groups_txt
)
samples_missing_in_modules_txt = sample_names_in_groups_txt.difference(
sample_names_in_modules_txt
)
if anvio.DEBUG:
self.run.info(
"Samples in modules-txt", ", ".join(list(sample_names_in_modules_txt))
)
self.run.info(
"Samples in groups-txt", ", ".join(list(sample_names_in_groups_txt))
)
self.run.info(
"Missing samples from groups-txt",
", ".join(list(samples_missing_in_groups_txt)),
)
self.run.info(
"Missing samples from modules-txt",
", ".join(list(samples_missing_in_modules_txt)),
)
if samples_missing_in_groups_txt:
missing_samples_str = ", ".join(samples_missing_in_groups_txt)
if not self.include_missing:
self.progress.reset()
self.run.warning(
f"Your groups-txt file does not contain some samples present in your modules-txt ({self.sample_header_in_modules_txt} "
"column). Since you have not elected to --include-samples-missing-from-groups-txt, we are not going to take these samples into consideration at all. "
"Here are the samples that we will be ignoring: "
f"{missing_samples_str}"
)
# drop the samples that are not in groups-txt
modules_df = modules_df[
~modules_df[self.sample_header_in_modules_txt].isin(
list(samples_missing_in_groups_txt)
)
]
if anvio.DEBUG:
self.run.info(
"Samples remaining in modules-txt dataframe after removing ungrouped",
", ".join(modules_df[self.sample_header_in_modules_txt].unique()),
)
else:
self.progress.reset()
self.run.warning(
f"Your groups-txt file does not contain some samples present in your modules-txt ({self.sample_header_in_modules_txt} "
"column). Since you have chosen to --include-samples-missing-from-groups-txt, for the purposes of this analysis we will now consider all of "
"these samples to belong to one group called 'UNGROUPED'. If you wish to ignore these samples instead, please run again "
"without the --include-ungrouped parameter. "
"Here are the UNGROUPED samples that we will consider as one big happy family: "
f"{missing_samples_str}"
)
# add those samples to the UNGROUPED group
ungrouped_samples = list(samples_missing_in_groups_txt)
for s in ungrouped_samples:
samples_to_groups_dict[s] = "UNGROUPED"
if samples_missing_in_modules_txt:
missing_samples_str = ", ".join(samples_missing_in_modules_txt)
if not self.just_do_it:
self.progress.reset()
raise ConfigError(
f"Your modules-txt file ({self.sample_header_in_modules_txt} column) does not contain some samples that "
"are present in your groups-txt. This is not necessarily a huge deal, it's just that those samples will "
"not be included in the enrichment analysis because, well, you don't have any module information for them. "
"If all of the missing samples belong to groups you don't care about at all, then feel free to ignore this "
"message and re-run using --just-do-it. But if you do care about those groups, you'd better fix this because "
"the enrichment results for those groups will be wrong. Here are the samples in question: "
f"{missing_samples_str}"
)
else:
self.progress.reset()
self.run.warning(
f"Your modules-txt file ({self.sample_header_in_modules_txt} column) does not contain some samples that "
"are present in your groups-txt. This is not necessarily a huge deal, it's just that those samples will "
"not be included in the enrichment analysis because, well, you don't have any module information for them. "
"Since you have used the --just-do-it parameter, we assume you don't care about this and are going to keep "
"going anyway. We hope you know what you are doing :) Here are the samples in question: "
f"{missing_samples_str}"
)
# drop the samples that are not in modules-txt
for s in list(samples_missing_in_modules_txt):
samples_to_groups_dict.pop(s)
if anvio.DEBUG:
self.run.info(
"Samples remaining in groups-txt dataframe after removing ungrouped",
", ".join(samples_to_groups_dict.keys()),
)
modules_df.set_index(self.sample_header_in_modules_txt, inplace=True)
sample_groups_df = pd.DataFrame.from_dict(
samples_to_groups_dict, orient="index", columns=["group"]
)
# convert modules mode output to enrichment input
N_values = sample_groups_df["group"].value_counts()
group_list = N_values.keys()
module_list = modules_df["kegg_module"].unique()
output_dict = {}
header_list = ["KEGG_MODULE", "accession", "sample_ids", "associated_groups"]
for c in group_list:
header_list.append(f"p_{c}")
header_list.append(f"N_{c}")
for mod_num in module_list:
query_string = f"kegg_module == '{mod_num}' and module_completeness >= {self.module_completion_threshold}"
samples_with_mod_df = modules_df.query(query_string)
if samples_with_mod_df.shape[0] == 0:
continue
# if we are working with module data from metagenomes, we may have multiple complete copies of the module in
# the same sample. We drop these duplicates before proceeding.
duplicates = samples_with_mod_df.index.duplicated()
samples_with_mod_df = samples_with_mod_df[~duplicates]
mod_name = samples_with_mod_df["module_name"][0]
output_dict[mod_name] = {}
output_dict[mod_name]["KEGG_MODULE"] = mod_name
output_dict[mod_name]["accession"] = mod_num
samples_with_mod_list = list(samples_with_mod_df.index)
output_dict[mod_name]["sample_ids"] = ",".join(samples_with_mod_list)
sample_group_subset = sample_groups_df.loc[samples_with_mod_list]
p_values = sample_group_subset["group"].value_counts()
# we need the categories p and N values to be in the same order for finding associated groups
p_vector = np.array([])
N_vector = np.array([])
for c in group_list:
if c not in p_values.index:
p_values[c] = 0
p_vector = np.append(p_vector, p_values[c] / N_values[c])
N_vector = np.append(N_vector, N_values[c])
# compute associated groups for functional enrichment
enriched_groups_vector = utils.get_enriched_groups(p_vector, N_vector)
associated_groups = [
c for i, c in enumerate(group_list) if enriched_groups_vector[i]
]
output_dict[mod_name]["associated_groups"] = ",".join(associated_groups)
for c in group_list:
output_dict[mod_name]["p_%s" % c] = p_values[c] / N_values[c]
output_dict[mod_name]["N_%s" % c] = N_values[c]
utils.store_dict_as_TAB_delimited_file(
output_dict, output_file_path, key_header="accession", headers=header_list
)
|
def get_enrichment_input(self, output_file_path):
"""This function converts modules mode output into input for anvi-script-enrichment-stats
The input format for anvi-script-enrichment-stats is described in a comment at the top of that script, and here is
how we get the values for each column:
The first column, 'KEGG_MODULE', and second column 'accession', are already in the modules mode output as 'module_name'
and 'kegg_module', respectively.
The 'N_*' columns are the total number of samples in each group.
For each module, this function determines which samples the module is 'present' in according to the specified completion threshold.
This determines the list of samples for the 'sample_ids' column as well as the 'p_*' proportions for each group of samples.
Finally, the fourth column, 'associated_groups', is computed from the 'p_*' proportions and 'N_*' totals.
PARAMETERS
==========
output_file_path : str
a file path where we will store the (temporary) input file for the enrichment script
"""
filesnpaths.is_output_file_writable(output_file_path)
# read the files into dataframes
modules_df = pd.read_csv(self.modules_txt, sep="\t")
# make sure we have all the columns we need in modules mode output, since this output can be customized
required_modules_txt_headers = ["kegg_module", "module_completeness", "module_name"]
missing_headers = []
for h in required_modules_txt_headers:
if h not in modules_df.columns:
missing_headers.append(h)
if missing_headers:
missing_string = ", ".join(missing_headers)
self.progress.reset()
raise ConfigError(
"We cannot go on! *dramatic sweep* We trust that you have provided us with "
"modules mode output, but unfortunately the modules-txt input does not contain "
f"the following required headers: {missing_string} Please re-generate your "
"modules-txt to include these before trying again."
)
if "unique_id" in modules_df.columns:
modules_df = modules_df.drop(columns=["unique_id"])
# samples column sanity check - this column will become the index
if self.sample_header_in_modules_txt not in modules_df.columns:
col_list = ", ".join(modules_df.columns)
self.progress.reset()
raise ConfigError(
f"You have specified that your sample names are in the column with header '{self.sample_header_in_modules_txt}' "
"in the modules-txt file, but that column does not exist. :( Please figure out which column is right and submit "
"it using the --sample-header parameter. Just so you know, the columns in modules-txt that you can choose from "
f"are: {col_list}"
)
required_groups_txt_headers = ["sample", "group"]
sample_groups_dict = utils.get_TAB_delimited_file_as_dictionary(
self.groups_txt, expected_fields=required_groups_txt_headers
)
samples_to_groups_dict = {
samp: sample_groups_dict[samp]["group"] for samp in sample_groups_dict.keys()
}
# make sure the samples all have a group
samples_with_none_group = []
for s, g in samples_to_groups_dict.items():
if not g:
samples_with_none_group.append(s)
if self.include_ungrouped:
samples_to_groups_dict[s] = "UNGROUPED"
if not self.include_ungrouped:
for s in samples_with_none_group:
samples_to_groups_dict.pop(s)
if samples_with_none_group:
self.progress.reset()
none_group_str = ", ".join(samples_with_none_group)
if self.include_ungrouped:
self.run.warning(
"Some samples in your groups-txt did not have a group, but since you elected to --include-ungrouped, "
"we will consider all of those samples to belong to one group called 'UNGROUPED'. Here are those "
f"UNGROUPED samples: {none_group_str}"
)
else:
self.run.warning(
"Some samples in your groups-txt did not have a group, and we will ignore those samples. If you "
"want them to be included in the analysis (but without assigning a group), you can simply re-run "
"this program with the --include-ungrouped flag. Now. Here are the samples we will be ignoring: "
f"{none_group_str}"
)
# sanity check for mismatch between modules-txt and groups-txt
sample_names_in_modules_txt = set(
modules_df[self.sample_header_in_modules_txt].unique()
)
sample_names_in_groups_txt = set(sample_groups_dict.keys())
samples_missing_in_groups_txt = sample_names_in_modules_txt.difference(
sample_names_in_groups_txt
)
samples_missing_in_modules_txt = sample_names_in_groups_txt.difference(
sample_names_in_modules_txt
)
if anvio.DEBUG:
self.run.info(
"Samples in modules-txt", ", ".join(list(sample_names_in_modules_txt))
)
self.run.info(
"Samples in groups-txt", ", ".join(list(sample_names_in_groups_txt))
)
self.run.info(
"Missing samples from groups-txt",
", ".join(list(samples_missing_in_groups_txt)),
)
self.run.info(
"Missing samples from modules-txt",
", ".join(list(samples_missing_in_modules_txt)),
)
if samples_missing_in_groups_txt:
missing_samples_str = ", ".join(samples_missing_in_groups_txt)
if not self.include_missing:
self.progress.reset()
self.run.warning(
f"Your groups-txt file does not contain some samples present in your modules-txt ({self.sample_header_in_modules_txt} "
"column). Since you have not elected to --include-samples-missing-from-groups-txt, we are not going to take these samples into consideration at all. "
"Here are the samples that we will be ignoring: "
f"{missing_samples_str}"
)
# drop the samples that are not in groups-txt
modules_df = modules_df[
~modules_df[self.sample_header_in_modules_txt].isin(
list(samples_missing_in_groups_txt)
)
]
if anvio.DEBUG:
self.run.info(
"Samples remaining in modules-txt dataframe after removing ungrouped",
", ".join(modules_df[self.sample_header_in_modules_txt].unique()),
)
else:
self.progress.reset()
self.run.warning(
f"Your groups-txt file does not contain some samples present in your modules-txt ({self.sample_header_in_modules_txt} "
"column). Since you have chosen to --include-samples-missing-from-groups-txt, for the purposes of this analysis we will now consider all of "
"these samples to belong to one group called 'UNGROUPED'. If you wish to ignore these samples instead, please run again "
"without the --include-ungrouped parameter. "
"Here are the UNGROUPED samples that we will consider as one big happy family: "
f"{missing_samples_str}"
)
# add those samples to the UNGROUPED group
ungrouped_samples = list(samples_missing_in_groups_txt)
for s in ungrouped_samples:
samples_to_groups_dict[s] = "UNGROUPED"
if samples_missing_in_modules_txt:
missing_samples_str = ", ".join(samples_missing_in_modules_txt)
if not self.just_do_it:
self.progress.reset()
raise ConfigError(
f"Your modules-txt file ({self.sample_header_in_modules_txt} column) does not contain some samples that "
"are present in your groups-txt. This is not necessarily a huge deal, it's just that those samples will "
"not be included in the enrichment analysis because, well, you don't have any module information for them. "
"If all of the missing samples belong to groups you don't care about at all, then feel free to ignore this "
"message and re-run using --just-do-it. But if you do care about those groups, you'd better fix this because "
"the enrichment results for those groups will be wrong. Here are the samples in question: "
f"{missing_samples_str}"
)
else:
self.progress.reset()
self.run.warning(
f"Your modules-txt file ({self.sample_header_in_modules_txt} column) does not contain some samples that "
"are present in your groups-txt. This is not necessarily a huge deal, it's just that those samples will "
"not be included in the enrichment analysis because, well, you don't have any module information for them. "
"Since you have used the --just-do-it parameter, we assume you don't care about this and are going to keep "
"going anyway. We hope you know what you are doing :) Here are the samples in question: "
f"{missing_samples_str}"
)
# drop the samples that are not in modules-txt
for s in list(samples_missing_in_modules_txt):
samples_to_groups_dict.pop(s)
if anvio.DEBUG:
self.run.info(
"Samples remaining in groups-txt dataframe after removing ungrouped",
", ".join(samples_to_groups_dict.keys()),
)
modules_df.set_index(self.sample_header_in_modules_txt, inplace=True)
sample_groups_df = pd.DataFrame.from_dict(
samples_to_groups_dict, orient="index", columns=["group"]
)
# convert modules mode output to enrichment input
N_values = sample_groups_df["group"].value_counts()
group_list = N_values.keys()
module_list = modules_df["kegg_module"].unique()
output_dict = {}
header_list = ["KEGG_MODULE", "accession", "sample_ids", "associated_groups"]
for c in group_list:
header_list.append(f"p_{c}")
header_list.append(f"N_{c}")
for mod_num in module_list:
query_string = f"kegg_module == '{mod_num}' and module_completeness >= {self.module_completion_threshold}"
samples_with_mod_df = modules_df.query(query_string)
if samples_with_mod_df.shape[0] == 0:
continue
mod_name = samples_with_mod_df["module_name"][0]
output_dict[mod_name] = {}
output_dict[mod_name]["KEGG_MODULE"] = mod_name
output_dict[mod_name]["accession"] = mod_num
samples_with_mod_list = list(samples_with_mod_df.index)
output_dict[mod_name]["sample_ids"] = ",".join(samples_with_mod_list)
sample_group_subset = sample_groups_df.loc[samples_with_mod_list]
p_values = sample_group_subset["group"].value_counts()
# we need the categories p and N values to be in the same order for finding associated groups
p_vector = np.array([])
N_vector = np.array([])
for c in group_list:
if c not in p_values.index:
p_values[c] = 0
p_vector = np.append(p_vector, p_values[c] / N_values[c])
N_vector = np.append(N_vector, N_values[c])
# compute associated groups for functional enrichment
enriched_groups_vector = utils.get_enriched_groups(p_vector, N_vector)
associated_groups = [
c for i, c in enumerate(group_list) if enriched_groups_vector[i]
]
output_dict[mod_name]["associated_groups"] = ",".join(associated_groups)
for c in group_list:
output_dict[mod_name]["p_%s" % c] = p_values[c] / N_values[c]
output_dict[mod_name]["N_%s" % c] = N_values[c]
utils.store_dict_as_TAB_delimited_file(
output_dict, output_file_path, key_header="accession", headers=header_list
)
|
https://github.com/merenlab/anvio/issues/1654
|
[27 Jan 21 17:31:49 Enrichment analysis] Converting modules mode output into input for enrichment script Traceback (most recent call last):
File "/Users/iva/opt/miniconda3/envs/anvio-dev/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 4730, in get_value
return self._engine.get_value(s, k, tz=getattr(series.dtype, "tz", None))
File "pandas/_libs/index.pyx", line 80, in pandas._libs.index.IndexEngine.get_value
File "pandas/_libs/index.pyx", line 88, in pandas._libs.index.IndexEngine.get_value
File "pandas/_libs/index.pyx", line 126, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/index.pyx", line 141, in pandas._libs.index.IndexEngine._get_loc_duplicates
TypeError: '<' not supported between instances of 'str' and 'int'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/iva/software/anvio/bin/anvi-compute-functional-enrichment", line 128, in <module>
main(args)
File "/Users/iva/software/anvio/bin/anvi-compute-functional-enrichment", line 58, in main
e.run_enrichment_stats()
File "/Users/iva/software/anvio/anvio/kegg.py", line 4608, in run_enrichment_stats
self.get_enrichment_input(enrichment_input_path)
File "/Users/iva/software/anvio/anvio/kegg.py", line 4567, in get_enrichment_input
mod_name = samples_with_mod_df['module_name'][0]
File "/Users/iva/opt/miniconda3/envs/anvio-dev/lib/python3.6/site-packages/pandas/core/series.py", line 1068, in __getitem__
result = self.index.get_value(self, key)
File "/Users/iva/opt/miniconda3/envs/anvio-dev/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 4750, in get_value
raise IndexError(key)
IndexError: 0
|
TypeError
|
def store_genes_basic_info(self):
if self.summary.quick:
return
self.progress.update("Sorting out gene calls ...")
d = {}
headers = ["contig", "start", "stop", "direction"]
header_items_for_gene_sequences = ["dna_sequence"]
if self.summary.report_aa_seqs_for_gene_calls:
header_items_for_gene_sequences.append("aa_sequence")
for gene_callers_id in self.gene_caller_ids:
d[gene_callers_id] = {}
# add sample independent information into `d`;
for header in headers:
if gene_callers_id not in self.summary.genes_in_contigs_dict:
progress.reset()
raise ConfigError(
"Bad news :( A very rare error has occurred. A gene caller id found in your splits table "
"is not occurring in the genes in contigs table of your contigs database. To fix this "
'error, please go here & search for "Meren\'s Solution": '
"https://github.com/merenlab/anvio/issues/1596. It would be extremely "
"helpful if you add a comment to that thread to describe the journey of your databases "
"so perhaps we can finally recapitulate this error and fix it once and for all. Apologies "
"for this, and thank you for your patience."
)
d[gene_callers_id][header] = self.summary.genes_in_contigs_dict[
gene_callers_id
][header]
self.progress.update("Sorting out functions ...")
# add functions if there are any:
if len(self.summary.gene_function_call_sources):
for source in self.summary.gene_function_call_sources:
if gene_callers_id not in self.summary.gene_function_calls_dict:
# this gene did not get any functional annotation
d[gene_callers_id][source] = ""
d[gene_callers_id][source + " (ACCESSION)"] = ""
continue
if self.summary.gene_function_calls_dict[gene_callers_id][source]:
d[gene_callers_id][source + " (ACCESSION)"] = (
self.summary.gene_function_calls_dict[gene_callers_id][source][
0
]
)
d[gene_callers_id][source] = self.summary.gene_function_calls_dict[
gene_callers_id
][source][1]
else:
d[gene_callers_id][source + " (ACCESSION)"] = ""
d[gene_callers_id][source] = ""
# finally add the dna and amino acid sequence for gene calls:
contig = self.summary.genes_in_contigs_dict[gene_callers_id]["contig"]
start = self.summary.genes_in_contigs_dict[gene_callers_id]["start"]
stop = self.summary.genes_in_contigs_dict[gene_callers_id]["stop"]
dna_sequence = self.summary.contig_sequences[contig]["sequence"][start:stop]
if self.summary.genes_in_contigs_dict[gene_callers_id]["direction"] == "r":
dna_sequence = utils.rev_comp(dna_sequence)
d[gene_callers_id]["dna_sequence"] = dna_sequence
# if the user asked for it, report amino acid sequences as well
if self.summary.report_aa_seqs_for_gene_calls:
try:
d[gene_callers_id]["aa_sequence"] = (
utils.get_translated_sequence_for_gene_call(
dna_sequence, gene_callers_id
)
)
except:
d[gene_callers_id]["aa_sequence"] = ""
output_file_obj = self.get_output_file_handle("gene_calls.txt")
if self.summary.gene_function_call_sources:
sources = [
[source, source + " (ACCESSION)"]
for source in self.summary.gene_function_call_sources
]
headers = (
["gene_callers_id"]
+ headers
+ [item for sublist in sources for item in sublist]
+ header_items_for_gene_sequences
)
else:
headers = ["gene_callers_id"] + headers + header_items_for_gene_sequences
if self.summary.reformat_contig_names:
for gene_callers_id in d:
reformatted_contig_name = self.contig_name_conversion_dict[
d[gene_callers_id]["contig"]
]["reformatted_contig_name"]
d[gene_callers_id]["contig"] = reformatted_contig_name
self.progress.update("Storing genes basic info ...")
utils.store_dict_as_TAB_delimited_file(
d, None, headers=headers, file_obj=output_file_obj
)
self.bin_info_dict["genes"] = {"num_genes_found": len(self.gene_caller_ids)}
|
def store_genes_basic_info(self):
if self.summary.quick:
return
self.progress.update("Sorting out gene calls ...")
d = {}
headers = ["contig", "start", "stop", "direction"]
header_items_for_gene_sequences = ["dna_sequence"]
if self.summary.report_aa_seqs_for_gene_calls:
header_items_for_gene_sequences.append("aa_sequence")
for gene_callers_id in self.gene_caller_ids:
d[gene_callers_id] = {}
# add sample independent information into `d`;
for header in headers:
d[gene_callers_id][header] = self.summary.genes_in_contigs_dict[
gene_callers_id
][header]
self.progress.update("Sorting out functions ...")
# add functions if there are any:
if len(self.summary.gene_function_call_sources):
for source in self.summary.gene_function_call_sources:
if gene_callers_id not in self.summary.gene_function_calls_dict:
# this gene did not get any functional annotation
d[gene_callers_id][source] = ""
d[gene_callers_id][source + " (ACCESSION)"] = ""
continue
if self.summary.gene_function_calls_dict[gene_callers_id][source]:
d[gene_callers_id][source + " (ACCESSION)"] = (
self.summary.gene_function_calls_dict[gene_callers_id][source][
0
]
)
d[gene_callers_id][source] = self.summary.gene_function_calls_dict[
gene_callers_id
][source][1]
else:
d[gene_callers_id][source + " (ACCESSION)"] = ""
d[gene_callers_id][source] = ""
# finally add the dna and amino acid sequence for gene calls:
contig = self.summary.genes_in_contigs_dict[gene_callers_id]["contig"]
start = self.summary.genes_in_contigs_dict[gene_callers_id]["start"]
stop = self.summary.genes_in_contigs_dict[gene_callers_id]["stop"]
dna_sequence = self.summary.contig_sequences[contig]["sequence"][start:stop]
if self.summary.genes_in_contigs_dict[gene_callers_id]["direction"] == "r":
dna_sequence = utils.rev_comp(dna_sequence)
d[gene_callers_id]["dna_sequence"] = dna_sequence
# if the user asked for it, report amino acid sequences as well
if self.summary.report_aa_seqs_for_gene_calls:
try:
d[gene_callers_id]["aa_sequence"] = (
utils.get_translated_sequence_for_gene_call(
dna_sequence, gene_callers_id
)
)
except:
d[gene_callers_id]["aa_sequence"] = ""
output_file_obj = self.get_output_file_handle("gene_calls.txt")
if self.summary.gene_function_call_sources:
sources = [
[source, source + " (ACCESSION)"]
for source in self.summary.gene_function_call_sources
]
headers = (
["gene_callers_id"]
+ headers
+ [item for sublist in sources for item in sublist]
+ header_items_for_gene_sequences
)
else:
headers = ["gene_callers_id"] + headers + header_items_for_gene_sequences
if self.summary.reformat_contig_names:
for gene_callers_id in d:
reformatted_contig_name = self.contig_name_conversion_dict[
d[gene_callers_id]["contig"]
]["reformatted_contig_name"]
d[gene_callers_id]["contig"] = reformatted_contig_name
self.progress.update("Storing genes basic info ...")
utils.store_dict_as_TAB_delimited_file(
d, None, headers=headers, file_obj=output_file_obj
)
self.bin_info_dict["genes"] = {"num_genes_found": len(self.gene_caller_ids)}
|
https://github.com/merenlab/anvio/issues/1596
|
[14 Dec 20 13:41:59 Summarizing 1 of 187: 'Bin_36'] Sorting out functions ... ETA: 0sTraceback (most recent call last):
File "/home/jgs286/git_sw/anvio/bin/anvi-summarize", line 123, in <module>
main(args)
File "/home/jgs286/git_sw/anvio/bin/anvi-summarize", line 69, in main
summary.process()
File "/home/jgs286/git_sw/anvio/anvio/summarizer.py", line 889, in process
self.summary['collection'][bin_id] = bin.create()
File "/home/jgs286/git_sw/anvio/anvio/summarizer.py", line 1326, in create
self.store_genes_basic_info()
File "/home/jgs286/git_sw/anvio/anvio/summarizer.py", line 1513, in store_genes_basic_info
d[gene_callers_id][header] = self.summary.genes_in_contigs_dict[gene_callers_id][header]
KeyError: 1147349
|
KeyError
|
def migrate(db_path):
if db_path is None:
raise ConfigError("No database path is given.")
utils.is_contigs_db(db_path)
contigs_db = db.DB(db_path, None, ignore_version=True)
if str(contigs_db.get_version()) != current_version:
raise ConfigError(
"Version of this contigs database is not %s (hence, this script cannot really do anything)."
% current_version
)
progress.new("Le migrateaoux")
progress.update("Creating a new table for tRNA taxonomy")
# just to be on the safe side.
try:
contigs_db.drop_table(trna_taxonomy_table_name)
except:
pass
try:
contigs_db.remove_meta_key_value_pair("trna_taxonomy_was_run")
contigs_db.remove_meta_key_value_pair("trna_taxonomy_database_version")
except:
pass
contigs_db.set_meta_value("trna_taxonomy_was_run", False)
contigs_db.set_meta_value("trna_taxonomy_database_version", None)
contigs_db.create_table(
trna_taxonomy_table_name,
trna_taxonomy_table_structure,
trna_taxonomy_table_types,
)
progress.update("Removing tRNA hits")
relevant_gene_calls = [
g[0]
for g in contigs_db._exec(
"select gene_callers_id from hmm_hits where source='Transfer_RNAs'"
).fetchall()
]
if len(relevant_gene_calls):
for table_name in [
"hmm_hits_info",
"hmm_hits",
"hmm_hits_in_splits",
"genes_in_contigs",
"gene_functions",
]:
contigs_db.remove_some_rows_from_table(
table_name, 'source IN ("Transfer_RNAs")'
)
CLAUSE = "gene_callers_id in (%s)" % (
",".join([str(x) for x in relevant_gene_calls])
)
for table_name in [
"gene_amino_acid_sequences",
"genes_taxonomy",
"genes_in_splits",
]:
contigs_db.remove_some_rows_from_table(table_name, CLAUSE)
gene_function_sources = contigs_db.get_meta_value("gene_function_sources")
if len(gene_function_sources):
new_gene_function_sources = ",".join(
[f for f in gene_function_sources.split(",") if f != "Transfer_RNAs"]
)
contigs_db.remove_meta_key_value_pair("gene_function_sources")
contigs_db.set_meta_value(
"gene_function_sources", new_gene_function_sources
)
progress.update("Updating version")
contigs_db.remove_meta_key_value_pair("version")
contigs_db.set_version(next_version)
progress.update("Committing changes")
contigs_db.disconnect()
progress.end()
message = (
f"The contigs database is now {next_version}. This upgrade added an empty tRNA taxonomy table "
f"in it. Probably you will never use it because who cares about tRNA taxonomy amirite? "
f"Well, IT WILL FOREVER BE THERE ANYWAY. BOOM."
)
if relevant_gene_calls:
message += (
" This update also removed tRNA HMM hits from your contis database :/ It was really very "
"necessary since one of the developers of anvi'o (*COUGH* meren *COUGH*) was very confused "
"about anticodons. Now everything is fixed, but unfortunately you will have to re-run "
"`anvi-scan-trnas` program on your contigs database :( "
)
run.info_single(message, nl_after=1, nl_before=1, mc="green")
|
def migrate(db_path):
if db_path is None:
raise ConfigError("No database path is given.")
utils.is_contigs_db(db_path)
contigs_db = db.DB(db_path, None, ignore_version=True)
if str(contigs_db.get_version()) != current_version:
raise ConfigError(
"Version of this contigs database is not %s (hence, this script cannot really do anything)."
% current_version
)
progress.new("Le migrateaoux")
progress.update("Creating a new table for tRNA taxonomy")
# just to be on the safe side.
try:
contigs_db.drop_table(trna_taxonomy_table_name)
except:
pass
try:
contigs_db.remove_meta_key_value_pair("trna_taxonomy_was_run")
contigs_db.remove_meta_key_value_pair("trna_taxonomy_database_version")
except:
pass
contigs_db.set_meta_value("trna_taxonomy_was_run", False)
contigs_db.set_meta_value("trna_taxonomy_database_version", None)
contigs_db.create_table(
trna_taxonomy_table_name,
trna_taxonomy_table_structure,
trna_taxonomy_table_types,
)
progress.update("Removing tRNA hits")
relevant_gene_calls = [
g[0]
for g in contigs_db._exec(
"select entry_id from hmm_hits where source='Transfer_RNAs'"
).fetchall()
]
if len(relevant_gene_calls):
for table_name in [
"hmm_hits_info",
"hmm_hits",
"hmm_hits_in_splits",
"genes_in_contigs",
"gene_functions",
]:
contigs_db.remove_some_rows_from_table(
table_name, 'source IN ("Transfer_RNAs")'
)
CLAUSE = "gene_callers_id in (%s)" % (
",".join([str(x) for x in relevant_gene_calls])
)
for table_name in [
"gene_amino_acid_sequences",
"genes_taxonomy",
"genes_in_splits",
]:
contigs_db.remove_some_rows_from_table(table_name, CLAUSE)
gene_function_sources = contigs_db.get_meta_value("gene_function_sources")
if len(gene_function_sources):
new_gene_function_sources = ",".join(
[f for f in gene_function_sources.split(",") if f != "Transfer_RNAs"]
)
contigs_db.remove_meta_key_value_pair("gene_function_sources")
contigs_db.set_meta_value(
"gene_function_sources", new_gene_function_sources
)
progress.update("Updating version")
contigs_db.remove_meta_key_value_pair("version")
contigs_db.set_version(next_version)
progress.update("Committing changes")
contigs_db.disconnect()
progress.end()
message = (
f"The contigs database is now {next_version}. This upgrade added an empty tRNA taxonomy table "
f"in it. Probably you will never use it because who cares about tRNA taxonomy amirite? "
f"Well, IT WILL FOREVER BE THERE ANYWAY. BOOM."
)
if relevant_gene_calls:
message += (
" This update also removed tRNA HMM hits from your contis database :/ It was really very "
"necessary since one of the developers of anvi'o (*COUGH* meren *COUGH*) was very confused "
"about anticodons. Now everything is fixed, but unfortunately you will have to re-run "
"`anvi-scan-trnas` program on your contigs database :( "
)
run.info_single(message, nl_after=1, nl_before=1, mc="green")
|
https://github.com/merenlab/anvio/issues/1596
|
[14 Dec 20 13:41:59 Summarizing 1 of 187: 'Bin_36'] Sorting out functions ... ETA: 0sTraceback (most recent call last):
File "/home/jgs286/git_sw/anvio/bin/anvi-summarize", line 123, in <module>
main(args)
File "/home/jgs286/git_sw/anvio/bin/anvi-summarize", line 69, in main
summary.process()
File "/home/jgs286/git_sw/anvio/anvio/summarizer.py", line 889, in process
self.summary['collection'][bin_id] = bin.create()
File "/home/jgs286/git_sw/anvio/anvio/summarizer.py", line 1326, in create
self.store_genes_basic_info()
File "/home/jgs286/git_sw/anvio/anvio/summarizer.py", line 1513, in store_genes_basic_info
d[gene_callers_id][header] = self.summary.genes_in_contigs_dict[gene_callers_id][header]
KeyError: 1147349
|
KeyError
|
def __init__(
self,
contigs_db_path,
sources=set([]),
split_names_of_interest=set([]),
init=True,
run=run,
progress=progress,
bin_name=None,
):
self.run = run
self.progress = progress
if not isinstance(sources, type(set([]))):
raise ConfigError("'sources' variable has to be a set instance.")
if not isinstance(split_names_of_interest, type(set([]))):
raise ConfigError(
"'split_names_of_interest' variable has to be a set instance."
)
self.bin_name = bin_name # this is only relevant for some output messages
self.sources = set([s for s in sources if s])
self.hmm_hits = {}
self.hmm_hits_info = {}
self.hmm_hits_splits = {}
self.contig_sequences = {}
self.aa_sequences = {}
self.genes_in_contigs = {}
self.splits_in_contigs = {}
if contigs_db_path:
self.init_dicts(contigs_db_path, split_names_of_interest)
self.initialized = True
else:
self.initialized = False
|
def __init__(
self,
contigs_db_path,
sources=set([]),
split_names_of_interest=set([]),
init=True,
run=run,
progress=progress,
):
self.run = run
self.progress = progress
if not isinstance(sources, type(set([]))):
raise ConfigError("'sources' variable has to be a set instance.")
if not isinstance(split_names_of_interest, type(set([]))):
raise ConfigError(
"'split_names_of_interest' variable has to be a set instance."
)
self.sources = set([s for s in sources if s])
self.hmm_hits = {}
self.hmm_hits_info = {}
self.hmm_hits_splits = {}
self.contig_sequences = {}
self.aa_sequences = {}
self.genes_in_contigs = {}
self.splits_in_contigs = {}
if contigs_db_path:
self.init_dicts(contigs_db_path, split_names_of_interest)
self.initialized = True
else:
self.initialized = False
|
https://github.com/merenlab/anvio/issues/1593
|
[09 Dec 20 09:32:25 Summarizing 104 of 159: 'Bin_3_32'] Creating the FASTA file ... ETA: 1m18sTraceback (most recent call last):
File "/workdir/lab_envs/anvio/bin/anvi-summarize", line 118, in <module>
main(args)
File "/workdir/lab_envs/anvio/bin/anvi-summarize", line 63, in main
summary.process()
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/summarizer.py", line 874, in process
self.summary['collection'][bin_id] = bin.create()
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/summarizer.py", line 1273, in create
self.store_sequences_for_hmm_hits()
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/summarizer.py", line 1533, in store_sequences_for_hmm_hits
hmm_sequences_dict = s.get_sequences_dict_for_hmm_hits_in_splits({self.bin_id: self.split_names})
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/hmmops.py", line 336, in get_sequences_dict_for_hmm_hits_in_splits
hmm_hit = self.hmm_hits[split_entry['hmm_hit_entry_id']]
KeyError: 17702
|
KeyError
|
def init_dicts(self, contigs_db_path, split_names_of_interest=set([])):
"""Initialize essential data for HMM stuff.
This function will do its best to not load any data that will not
be used later for HMM related operations. For instance, it will
learn which gene caller ids are of interest based on HMM sources,
and only recover data for splits and contigs based on that information,
not accessing a large fraction of a given contigs database.
"""
utils.is_contigs_db(contigs_db_path)
contigs_db = db.DB(
contigs_db_path,
anvio.__contigs__version__,
run=self.run,
progress=self.progress,
)
self.hmm_hits_info = contigs_db.get_table_as_dict(t.hmm_hits_info_table_name)
missing_sources = [s for s in self.sources if s not in self.hmm_hits_info]
if len(missing_sources):
contigs_db.disconnect()
progress.reset()
raise ConfigError(
"Bad news, Houston :/ The contigs database '%s' is missing one or more HMM sources "
"that you wished it didn't: '%s'."
% (contigs_db_path, ", ".join(missing_sources))
)
if not self.sources:
self.sources = set(list(self.hmm_hits_info.keys()))
if not self.sources:
# there is nothing to initialize..
return
self.progress.new("Recovering sequences for HMM Hits")
self.progress.update("...")
# get data from HMM tables based on sources of interest
self.progress.update(
"Getting data from HMM tables for %d source(s)" % len(self.sources)
)
where_clause_for_sources = "source in (%s)" % ", ".join(
['"%s"' % s for s in self.sources]
)
self.hmm_hits = contigs_db.get_some_rows_from_table_as_dict(
t.hmm_hits_table_name,
where_clause=where_clause_for_sources,
error_if_no_data=False,
)
self.hmm_hits_splits = contigs_db.get_some_rows_from_table_as_dict(
t.hmm_hits_splits_table_name,
where_clause=where_clause_for_sources,
error_if_no_data=False,
)
# if the user sent a split names of interest, it means they are interested in hits that only occur
# in a specific set of split names. NOTE: this really makes it very difficult to deal with HMM hits
# that span through multiple splits. here we will mark such HMM hits in `hmm_hits_splits_entry_ids_to_remove`
# for removal, but we will also keep track of those guys and let the user know what happened.
if len(split_names_of_interest):
total_num_split_names = len(
set(
self.hmm_hits_splits[entry_id]["split"]
for entry_id in self.hmm_hits_splits
)
)
hmm_hits_splits_entry_ids_to_remove = set([])
hmm_hits_entry_ids_to_remove = set([])
hmm_hits_entry_ids_associated_with_fragmented_hmm_hits = set([])
hmm_sources_associated_with_fragmented_hmm_hits = set([])
for entry_id in self.hmm_hits_splits:
if self.hmm_hits_splits[entry_id]["split"] not in split_names_of_interest:
hmm_hits_splits_entry_ids_to_remove.add(entry_id)
hmm_hits_entry_ids_to_remove.add(
self.hmm_hits_splits[entry_id]["hmm_hit_entry_id"]
)
if not self.hmm_hits_splits[entry_id]["percentage_in_split"] == 100:
# this is important. if we are here, there is a bit more to do since it means that
# the split name associated with self.hmm_hits_splits[entry_id] is not in
# `split_names_of_interest`. but since the `percentage_in_split` for this HMM hit is NOT
# 100%, it could be the case that other splits that contain pieces of this HMM hit may
# still be in `split_names_of_interest`. But here we are setting the stage for this
# HMM hit to be removed from `self.hmm_hits` altogether. To make things right, we must
# go through `hmm_hits_splits`, and remove remaining entries there that is associated with
# this HMM hit later using the contents of this variable:
hmm_hits_entry_ids_associated_with_fragmented_hmm_hits.add(
self.hmm_hits_splits[entry_id]["hmm_hit_entry_id"]
)
hmm_sources_associated_with_fragmented_hmm_hits.add(
self.hmm_hits_splits[entry_id]["source"]
)
if len(hmm_hits_entry_ids_associated_with_fragmented_hmm_hits):
# if we are here, we will have to update `hmm_hits_splits_entry_ids_to_remove` carefully:
additional_entry_ids_to_be_removed = set(
[
e
for e in self.hmm_hits_splits
if self.hmm_hits_splits[e]["hmm_hit_entry_id"]
in hmm_hits_entry_ids_associated_with_fragmented_hmm_hits
]
)
hmm_hits_splits_entry_ids_to_remove.update(
additional_entry_ids_to_be_removed
)
# let's warn the user while we're at it so they panic, too.
progress.reset()
if self.bin_name:
header = f"A WARNING RELATED TO HMMs IN '{self.bin_name}'"
else:
header = f"WARNING"
self.run.warning(
f"While anvi'o was trying to finalize HMM hits associated with splits of interest, "
f"it realized that there were one or more HMM hits that spanned through multiple splits "
f"yet not all of those splits were among the splits of interest. This can happen if you "
f"refined a contig by excluding some of its splits either manually during binning or "
f"automatically during whatever black magic you were engaged in. Anvi'o does not judge. But "
f"In these situations anvi'o excludes the entire HMM hit from being reported to be on the "
f"safe side. Some HMM hits coming from {P('HMM source', len(hmm_sources_associated_with_fragmented_hmm_hits))} "
f'("{", ".join(hmm_sources_associated_with_fragmented_hmm_hits)}"), will not appear in your '
f"downstream analyses (including in your `anvi-summarize` outputs). If you are really really "
f"interested in those partial HMM hits and sequences associated with them, there are multiple "
f"ways to recover them (one of the best way to do it involves the use of `anvi-split` and "
f"re-running HMMs in your final bins). Please feel free to reach out to the anvi'o community "
f"for ideas.",
header=header,
)
if len(hmm_hits_splits_entry_ids_to_remove):
for entry_id in hmm_hits_splits_entry_ids_to_remove:
self.hmm_hits_splits.pop(entry_id)
if len(hmm_hits_entry_ids_to_remove):
for entry_id in hmm_hits_entry_ids_to_remove:
self.hmm_hits.pop(entry_id)
filtered_num_split_names = len(
set(
self.hmm_hits_splits[entry_id]["split"]
for entry_id in self.hmm_hits_splits
)
)
if anvio.DEBUG:
self.progress.end()
self.run.warning(None, header="SequencesForHMMHits info")
self.run.info_single(
"%d split names of interest are found" % len(split_names_of_interest)
)
self.run.info("Total split names w/HMM hits", total_num_split_names)
self.run.info(
"Final split names w/HMM hits", filtered_num_split_names, nl_after=1
)
self.progress.new("Recovering sequences for HMM Hits")
self.progress.update("...")
if not len(self.hmm_hits):
# there are HMMs but no hits. FINE.
self.progress.end()
contigs_db.disconnect()
return
gene_caller_ids_of_interest = set(
[e["gene_callers_id"] for e in self.hmm_hits.values()]
)
where_clause_for_genes = "gene_callers_id in (%s)" % ", ".join(
["%d" % g for g in gene_caller_ids_of_interest]
)
self.progress.update(
"Recovering split and contig names for %d genes"
% (len(gene_caller_ids_of_interest))
)
split_names_of_interest, contig_names_of_interest = (
utils.get_split_and_contig_names_of_interest(
contigs_db_path, gene_caller_ids_of_interest
)
)
self.progress.update(
"Recovering contig seqs for %d genes" % (len(gene_caller_ids_of_interest))
)
where_clause_for_contigs = "contig in (%s)" % ", ".join(
['"%s"' % s for s in contig_names_of_interest]
)
self.contig_sequences = contigs_db.get_some_rows_from_table_as_dict(
t.contig_sequences_table_name,
string_the_key=True,
where_clause=where_clause_for_contigs,
)
self.progress.update(
"Recovering amino acid seqs for %d genes" % (len(gene_caller_ids_of_interest))
)
self.aa_sequences = contigs_db.get_some_rows_from_table_as_dict(
t.gene_amino_acid_sequences_table_name, where_clause=where_clause_for_genes
)
self.genes_in_contigs = contigs_db.get_some_rows_from_table_as_dict(
t.genes_in_contigs_table_name, where_clause=where_clause_for_genes
)
self.splits_in_contigs = list(split_names_of_interest)
self.progress.end()
contigs_db.disconnect()
|
def init_dicts(self, contigs_db_path, split_names_of_interest=set([])):
"""Initialize essential data for HMM stuff.
This function will do its best to not load any data that will not
be used later for HMM related operations. For instance, it will
learn which gene caller ids are of interest based on HMM sources,
and only recover data for splits and contigs based on that information,
not accessing a large fraction of a given contigs database.
"""
utils.is_contigs_db(contigs_db_path)
contigs_db = db.DB(
contigs_db_path,
anvio.__contigs__version__,
run=self.run,
progress=self.progress,
)
self.hmm_hits_info = contigs_db.get_table_as_dict(t.hmm_hits_info_table_name)
missing_sources = [s for s in self.sources if s not in self.hmm_hits_info]
if len(missing_sources):
contigs_db.disconnect()
progress.reset()
raise ConfigError(
"Bad news, Houston :/ The contigs database '%s' is missing one or more HMM sources "
"that you wished it didn't: '%s'."
% (contigs_db_path, ", ".join(missing_sources))
)
if not self.sources:
self.sources = set(list(self.hmm_hits_info.keys()))
if not self.sources:
# there is nothing to initialize..
return
self.progress.new("Recovering sequences for HMM Hits")
self.progress.update("...")
# get data from HMM tables based on sources of interest
self.progress.update(
"Getting data from HMM tables for %d source(s)" % len(self.sources)
)
where_clause_for_sources = "source in (%s)" % ", ".join(
['"%s"' % s for s in self.sources]
)
self.hmm_hits = contigs_db.get_some_rows_from_table_as_dict(
t.hmm_hits_table_name,
where_clause=where_clause_for_sources,
error_if_no_data=False,
)
self.hmm_hits_splits = contigs_db.get_some_rows_from_table_as_dict(
t.hmm_hits_splits_table_name,
where_clause=where_clause_for_sources,
error_if_no_data=False,
)
# if the user sent a split names of interest, it means they are interested in hits that only occur
# in a specific set of split names.
if len(split_names_of_interest):
total_num_split_names = len(
set(
self.hmm_hits_splits[entry_id]["split"]
for entry_id in self.hmm_hits_splits
)
)
hmm_hits_splits_entry_ids_to_remove = set([])
hmm_hits_entry_ids_to_remove = set([])
for entry_id in self.hmm_hits_splits:
if self.hmm_hits_splits[entry_id]["split"] not in split_names_of_interest:
hmm_hits_splits_entry_ids_to_remove.add(entry_id)
hmm_hits_entry_ids_to_remove.add(
self.hmm_hits_splits[entry_id]["hmm_hit_entry_id"]
)
if len(hmm_hits_splits_entry_ids_to_remove):
for entry_id in hmm_hits_splits_entry_ids_to_remove:
self.hmm_hits_splits.pop(entry_id)
if len(hmm_hits_entry_ids_to_remove):
for entry_id in hmm_hits_entry_ids_to_remove:
self.hmm_hits.pop(entry_id)
filtered_num_split_names = len(
set(
self.hmm_hits_splits[entry_id]["split"]
for entry_id in self.hmm_hits_splits
)
)
if anvio.DEBUG:
self.progress.end()
self.run.warning(None, header="SequencesForHMMHits info")
self.run.info_single(
"%d split names of interest are found" % len(split_names_of_interest)
)
self.run.info("Total split names w/HMM hits", total_num_split_names)
self.run.info(
"Final split names w/HMM hits", filtered_num_split_names, nl_after=1
)
self.progress.new("Recovering sequences for HMM Hits")
self.progress.update("...")
if not len(self.hmm_hits):
# there are HMMs but no hits. FINE.
self.progress.end()
contigs_db.disconnect()
return
gene_caller_ids_of_interest = set(
[e["gene_callers_id"] for e in self.hmm_hits.values()]
)
where_clause_for_genes = "gene_callers_id in (%s)" % ", ".join(
["%d" % g for g in gene_caller_ids_of_interest]
)
self.progress.update(
"Recovering split and contig names for %d genes"
% (len(gene_caller_ids_of_interest))
)
split_names_of_interest, contig_names_of_interest = (
utils.get_split_and_contig_names_of_interest(
contigs_db_path, gene_caller_ids_of_interest
)
)
self.progress.update(
"Recovering contig seqs for %d genes" % (len(gene_caller_ids_of_interest))
)
where_clause_for_contigs = "contig in (%s)" % ", ".join(
['"%s"' % s for s in contig_names_of_interest]
)
self.contig_sequences = contigs_db.get_some_rows_from_table_as_dict(
t.contig_sequences_table_name,
string_the_key=True,
where_clause=where_clause_for_contigs,
)
self.progress.update(
"Recovering amino acid seqs for %d genes" % (len(gene_caller_ids_of_interest))
)
self.aa_sequences = contigs_db.get_some_rows_from_table_as_dict(
t.gene_amino_acid_sequences_table_name, where_clause=where_clause_for_genes
)
self.genes_in_contigs = contigs_db.get_some_rows_from_table_as_dict(
t.genes_in_contigs_table_name, where_clause=where_clause_for_genes
)
self.splits_in_contigs = list(split_names_of_interest)
self.progress.end()
contigs_db.disconnect()
|
https://github.com/merenlab/anvio/issues/1593
|
[09 Dec 20 09:32:25 Summarizing 104 of 159: 'Bin_3_32'] Creating the FASTA file ... ETA: 1m18sTraceback (most recent call last):
File "/workdir/lab_envs/anvio/bin/anvi-summarize", line 118, in <module>
main(args)
File "/workdir/lab_envs/anvio/bin/anvi-summarize", line 63, in main
summary.process()
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/summarizer.py", line 874, in process
self.summary['collection'][bin_id] = bin.create()
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/summarizer.py", line 1273, in create
self.store_sequences_for_hmm_hits()
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/summarizer.py", line 1533, in store_sequences_for_hmm_hits
hmm_sequences_dict = s.get_sequences_dict_for_hmm_hits_in_splits({self.bin_id: self.split_names})
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/hmmops.py", line 336, in get_sequences_dict_for_hmm_hits_in_splits
hmm_hit = self.hmm_hits[split_entry['hmm_hit_entry_id']]
KeyError: 17702
|
KeyError
|
def store_sequences_for_hmm_hits(self):
if self.summary.quick:
return
s = SequencesForHMMHits(
self.summary.contigs_db_path,
split_names_of_interest=self.split_names,
progress=progress_quiet,
bin_name=self.bin_id,
)
hmm_sequences_dict = s.get_sequences_dict_for_hmm_hits_in_splits(
{self.bin_id: self.split_names}
)
if self.summary.reformat_contig_names:
for entry_id in hmm_sequences_dict:
reformatted_contig_name = self.contig_name_conversion_dict[
hmm_sequences_dict[entry_id]["contig"]
]["reformatted_contig_name"]
hmm_sequences_dict[entry_id]["contig"] = reformatted_contig_name
single_copy_gene_hmm_sources = [
hmm_search_source
for hmm_search_type, hmm_search_source in self.summary.hmm_searches_header
]
non_single_copy_gene_hmm_sources = self.summary.completeness.sources
for hmm_search_source in (
single_copy_gene_hmm_sources + non_single_copy_gene_hmm_sources
):
filtered_hmm_sequences_dict = utils.get_filtered_dict(
hmm_sequences_dict, "source", set([hmm_search_source])
)
output_file_obj = self.get_output_file_handle(
"%s-hmm-sequences.txt" % hmm_search_source, key=hmm_search_source
)
for gene_unique_id in filtered_hmm_sequences_dict:
header, sequence = s.get_FASTA_header_and_sequence_for_gene_unique_id(
hmm_sequences_dict, gene_unique_id
)
output_file_obj.write(">%s\n%s\n" % (header, sequence))
|
def store_sequences_for_hmm_hits(self):
if self.summary.quick:
return
s = SequencesForHMMHits(
self.summary.contigs_db_path,
split_names_of_interest=self.split_names,
progress=progress_quiet,
)
hmm_sequences_dict = s.get_sequences_dict_for_hmm_hits_in_splits(
{self.bin_id: self.split_names}
)
if self.summary.reformat_contig_names:
for entry_id in hmm_sequences_dict:
reformatted_contig_name = self.contig_name_conversion_dict[
hmm_sequences_dict[entry_id]["contig"]
]["reformatted_contig_name"]
hmm_sequences_dict[entry_id]["contig"] = reformatted_contig_name
single_copy_gene_hmm_sources = [
hmm_search_source
for hmm_search_type, hmm_search_source in self.summary.hmm_searches_header
]
non_single_copy_gene_hmm_sources = self.summary.completeness.sources
for hmm_search_source in (
single_copy_gene_hmm_sources + non_single_copy_gene_hmm_sources
):
filtered_hmm_sequences_dict = utils.get_filtered_dict(
hmm_sequences_dict, "source", set([hmm_search_source])
)
output_file_obj = self.get_output_file_handle(
"%s-hmm-sequences.txt" % hmm_search_source, key=hmm_search_source
)
for gene_unique_id in filtered_hmm_sequences_dict:
header, sequence = s.get_FASTA_header_and_sequence_for_gene_unique_id(
hmm_sequences_dict, gene_unique_id
)
output_file_obj.write(">%s\n%s\n" % (header, sequence))
|
https://github.com/merenlab/anvio/issues/1593
|
[09 Dec 20 09:32:25 Summarizing 104 of 159: 'Bin_3_32'] Creating the FASTA file ... ETA: 1m18sTraceback (most recent call last):
File "/workdir/lab_envs/anvio/bin/anvi-summarize", line 118, in <module>
main(args)
File "/workdir/lab_envs/anvio/bin/anvi-summarize", line 63, in main
summary.process()
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/summarizer.py", line 874, in process
self.summary['collection'][bin_id] = bin.create()
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/summarizer.py", line 1273, in create
self.store_sequences_for_hmm_hits()
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/summarizer.py", line 1533, in store_sequences_for_hmm_hits
hmm_sequences_dict = s.get_sequences_dict_for_hmm_hits_in_splits({self.bin_id: self.split_names})
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/hmmops.py", line 336, in get_sequences_dict_for_hmm_hits_in_splits
hmm_hit = self.hmm_hits[split_entry['hmm_hit_entry_id']]
KeyError: 17702
|
KeyError
|
def do_contigs_db(self):
self.progress.new('Splitting "%s"' % self.bin_id)
self.progress.update("Subsetting the contigs database")
bin_contigs_db = dbops.ContigsDatabase(self.bin_contigs_db_path)
bin_contigs_db.touch()
# copy-paste tables that will largely stay the same from the parent
bin_contigs_db.db.copy_paste(table_name="self", source_db_path=self.contigs_db_path)
bin_contigs_db.db.copy_paste(
table_name="hmm_hits_info", source_db_path=self.contigs_db_path
)
bin_contigs_db.db.copy_paste(
table_name="taxon_names", source_db_path=self.contigs_db_path
)
# update some variables in the self table:
self.contigs_db_hash = bin_contigs_db.get_hash()
bin_contigs_db.db.update_meta_value("num_contigs", self.num_contigs)
bin_contigs_db.db.update_meta_value("num_splits", self.num_splits)
bin_contigs_db.db.update_meta_value("total_length", self.total_length)
bin_contigs_db.db.update_meta_value("creation_date", bin_contigs_db.get_date())
bin_contigs_db.db.update_meta_value("contigs_db_hash", self.contigs_db_hash)
bin_contigs_db.db.update_meta_value("project_name", self.bin_id)
# the empty contigs db is ready
bin_contigs_db.disconnect()
# touch does not create the k-mers tables, so the resulting contigs db is missing them. we
# will add them to the db here.
bin_contigs_db = dbops.ContigsDatabase(self.bin_contigs_db_path)
k = KMerTablesForContigsAndSplits(None, k=bin_contigs_db.meta["kmer_size"])
for table_name in ["kmer_contigs", "kmer_splits"]:
bin_contigs_db.db.create_table(
table_name, k.kmers_table_structure, k.kmers_table_types
)
bin_contigs_db.disconnect()
# setup the filtering rules for migrating data:
tables = {
t.contig_sequences_table_name: ("contig", self.contig_names),
t.contigs_info_table_name: ("contig", self.contig_names),
t.gene_function_calls_table_name: ("gene_callers_id", self.gene_caller_ids),
t.gene_amino_acid_sequences_table_name: (
"gene_callers_id",
self.gene_caller_ids,
),
t.genes_in_contigs_table_name: ("gene_callers_id", self.gene_caller_ids),
t.genes_in_splits_table_name: ("gene_callers_id", self.gene_caller_ids),
t.genes_taxonomy_table_name: ("gene_callers_id", self.gene_caller_ids),
t.hmm_hits_table_name: ("gene_callers_id", self.gene_caller_ids),
t.hmm_hits_splits_table_name: ("split", self.split_names),
t.splits_info_table_name: ("split", self.split_names),
t.splits_taxonomy_table_name: ("split", self.split_names),
t.nt_position_info_table_name: ("contig_name", self.contig_names),
t.scg_taxonomy_table_name: ("gene_callers_id", self.gene_caller_ids),
"kmer_contigs": ("contig", self.split_names),
"kmer_splits": ("contig", self.split_names),
}
self.migrate_data(tables, self.contigs_db_path, self.bin_contigs_db_path)
# We're done here in theroy, but there is one more thing to do due to reasons partially explained in
# issue https://github.com/merenlab/anvio/issues/1593 and PR https://github.com/merenlab/anvio/pull/1595.
# The solution presented in the PR does not apply to split projects. so here we will calculate
# what percentage of HMM hits are in splits described in this bin, and remove those that are less
# than 100%.
bin_contigs_db = dbops.ContigsDatabase(self.bin_contigs_db_path)
hmm_hits_in_splits_dict = bin_contigs_db.db.get_table_as_dict(
t.hmm_hits_splits_table_name
)
# the purpose of the folloing dict is to keep track of what total percentage of a given HMM hit is
# described by all contig splits involved in this bin
hmm_hits_id_percentage_described_dict = Counter({})
for entry in hmm_hits_in_splits_dict.values():
hmm_hits_id_percentage_described_dict[entry["hmm_hit_entry_id"]] += entry[
"percentage_in_split"
]
# now the `hmm_hits_id_percentage_described_dict` looks like this:
#
# {2: 100, 3: 100.0, 5: 90.86727989487517, 6: 99.99999999999999, 4: 63.99858956276446}
#
# HMM hit ids that need to be cleared out from th `hmm_hits_in_splits` table is clear: 5 and 4, in this
# example. But the problem is, due floating point logistics, in some cases things are not quite 100%,
# although in reality they are, hence the need for `round`ing the percentages below.
hmm_hit_ids_to_delete = [
hit_id
for hit_id in hmm_hits_id_percentage_described_dict
if round(hmm_hits_id_percentage_described_dict[hit_id]) < 100
]
where_clause = (
f"hmm_hit_entry_id IN ({','.join([str(i) for i in hmm_hit_ids_to_delete])})"
)
bin_contigs_db.db.remove_some_rows_from_table(
t.hmm_hits_splits_table_name, where_clause=where_clause
)
self.progress.end()
|
def do_contigs_db(self):
self.progress.new('Splitting "%s"' % self.bin_id)
self.progress.update("Subsetting the contigs database")
bin_contigs_db = dbops.ContigsDatabase(self.bin_contigs_db_path)
bin_contigs_db.touch()
# copy-paste tables that will largely stay the same from the parent
bin_contigs_db.db.copy_paste(table_name="self", source_db_path=self.contigs_db_path)
bin_contigs_db.db.copy_paste(
table_name="hmm_hits_info", source_db_path=self.contigs_db_path
)
bin_contigs_db.db.copy_paste(
table_name="taxon_names", source_db_path=self.contigs_db_path
)
# update some variables in the self table:
self.contigs_db_hash = bin_contigs_db.get_hash()
bin_contigs_db.db.update_meta_value("num_contigs", self.num_contigs)
bin_contigs_db.db.update_meta_value("num_splits", self.num_splits)
bin_contigs_db.db.update_meta_value("total_length", self.total_length)
bin_contigs_db.db.update_meta_value("creation_date", bin_contigs_db.get_date())
bin_contigs_db.db.update_meta_value("contigs_db_hash", self.contigs_db_hash)
bin_contigs_db.db.update_meta_value("project_name", self.bin_id)
# the empty contigs db is ready
bin_contigs_db.disconnect()
# touch does not create the k-mers tables, so the resulting contigs db is missing them. we
# will add them to the db here.
bin_contigs_db = dbops.ContigsDatabase(self.bin_contigs_db_path)
k = KMerTablesForContigsAndSplits(None, k=bin_contigs_db.meta["kmer_size"])
for table_name in ["kmer_contigs", "kmer_splits"]:
bin_contigs_db.db.create_table(
table_name, k.kmers_table_structure, k.kmers_table_types
)
bin_contigs_db.disconnect()
# setup the filtering rules for migrating data:
tables = {
t.contig_sequences_table_name: ("contig", self.contig_names),
t.contigs_info_table_name: ("contig", self.contig_names),
t.gene_function_calls_table_name: ("gene_callers_id", self.gene_caller_ids),
t.gene_amino_acid_sequences_table_name: (
"gene_callers_id",
self.gene_caller_ids,
),
t.genes_in_contigs_table_name: ("gene_callers_id", self.gene_caller_ids),
t.genes_in_splits_table_name: ("gene_callers_id", self.gene_caller_ids),
t.genes_taxonomy_table_name: ("gene_callers_id", self.gene_caller_ids),
t.hmm_hits_table_name: ("gene_callers_id", self.gene_caller_ids),
t.hmm_hits_splits_table_name: ("split", self.split_names),
t.splits_info_table_name: ("split", self.split_names),
t.splits_taxonomy_table_name: ("split", self.split_names),
t.nt_position_info_table_name: ("contig_name", self.contig_names),
t.scg_taxonomy_table_name: ("gene_callers_id", self.gene_caller_ids),
"kmer_contigs": ("contig", self.split_names),
"kmer_splits": ("contig", self.split_names),
}
self.migrate_data(tables, self.contigs_db_path, self.bin_contigs_db_path)
self.progress.end()
|
https://github.com/merenlab/anvio/issues/1593
|
[09 Dec 20 09:32:25 Summarizing 104 of 159: 'Bin_3_32'] Creating the FASTA file ... ETA: 1m18sTraceback (most recent call last):
File "/workdir/lab_envs/anvio/bin/anvi-summarize", line 118, in <module>
main(args)
File "/workdir/lab_envs/anvio/bin/anvi-summarize", line 63, in main
summary.process()
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/summarizer.py", line 874, in process
self.summary['collection'][bin_id] = bin.create()
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/summarizer.py", line 1273, in create
self.store_sequences_for_hmm_hits()
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/summarizer.py", line 1533, in store_sequences_for_hmm_hits
hmm_sequences_dict = s.get_sequences_dict_for_hmm_hits_in_splits({self.bin_id: self.split_names})
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/hmmops.py", line 336, in get_sequences_dict_for_hmm_hits_in_splits
hmm_hit = self.hmm_hits[split_entry['hmm_hit_entry_id']]
KeyError: 17702
|
KeyError
|
def init_dicts(self, contigs_db_path, split_names_of_interest=set([])):
"""Initialize essential data for HMM stuff.
This function will do its best to not load any data that will not
be used later for HMM related operations. For instance, it will
learn which gene caller ids are of interest based on HMM sources,
and only recover data for splits and contigs based on that information,
not accessing a large fraction of a given contigs database.
"""
utils.is_contigs_db(contigs_db_path)
contigs_db = db.DB(
contigs_db_path,
anvio.__contigs__version__,
run=self.run,
progress=self.progress,
)
self.hmm_hits_info = contigs_db.get_table_as_dict(t.hmm_hits_info_table_name)
missing_sources = [s for s in self.sources if s not in self.hmm_hits_info]
if len(missing_sources):
contigs_db.disconnect()
progress.reset()
raise ConfigError(
"Bad news, Houston :/ The contigs database '%s' is missing one or more HMM sources "
"that you wished it didn't: '%s'."
% (contigs_db_path, ", ".join(missing_sources))
)
if not self.sources:
self.sources = set(list(self.hmm_hits_info.keys()))
if not self.sources:
# there is nothing to initialize..
return
self.progress.new("Recovering sequences for HMM Hits")
self.progress.update("...")
# get data from HMM tables based on sources of interest
self.progress.update(
"Getting data from HMM tables for %d source(s)" % len(self.sources)
)
where_clause_for_sources = "source in (%s)" % ", ".join(
['"%s"' % s for s in self.sources]
)
self.hmm_hits = contigs_db.get_some_rows_from_table_as_dict(
t.hmm_hits_table_name,
where_clause=where_clause_for_sources,
error_if_no_data=False,
)
self.hmm_hits_splits = contigs_db.get_some_rows_from_table_as_dict(
t.hmm_hits_splits_table_name,
where_clause=where_clause_for_sources,
error_if_no_data=False,
)
# if the user sent a split names of interest, it means they are interested in hits that only occur
# in a specific set of split names. NOTE: this really makes it very difficult to deal with HMM hits
# that span through multiple splits. here we will mark such HMM hits in `hmm_hits_splits_entry_ids_to_remove`
# for removal, but we will also keep track of those guys and let the user know what happened.
if len(split_names_of_interest):
total_num_split_names = len(
set(
self.hmm_hits_splits[entry_id]["split"]
for entry_id in self.hmm_hits_splits
)
)
hmm_hits_splits_entry_ids_to_remove = set([])
hmm_hits_entry_ids_to_remove = set([])
hmm_hits_entry_ids_associated_with_fragmented_hmm_hits = set([])
hmm_sources_associated_with_fragmented_hmm_hits = set([])
for entry_id in self.hmm_hits_splits:
if self.hmm_hits_splits[entry_id]["split"] not in split_names_of_interest:
hmm_hits_splits_entry_ids_to_remove.add(entry_id)
hmm_hits_entry_ids_to_remove.add(
self.hmm_hits_splits[entry_id]["hmm_hit_entry_id"]
)
if not self.hmm_hits_splits[entry_id]["percentage_in_split"] == 100:
# this is important. if we are here, there is a bit more to do since it means that
# the split name associated with self.hmm_hits_splits[entry_id] is not in
# `split_names_of_interest`. but since the `percentage_in_split` for this HMM hit is NOT
# 100%, it could be the case that other splits that contain pieces of this HMM hit may
# still be in `split_names_of_interest`. But here we are setting the stage for this
# HMM hit to be removed from `self.hmm_hits` altogether. To make things right, we must
# go through `hmm_hits_splits`, and remove remaining entries there that is associated with
# this HMM hit later using the contents of this variable:
hmm_hits_entry_ids_associated_with_fragmented_hmm_hits.add(
self.hmm_hits_splits[entry_id]["hmm_hit_entry_id"]
)
hmm_sources_associated_with_fragmented_hmm_hits.add(
self.hmm_hits_splits[entry_id]["source"]
)
if len(hmm_hits_entry_ids_associated_with_fragmented_hmm_hits):
# if we are here, we will have to update `hmm_hits_splits_entry_ids_to_remove` carefully:
additional_entry_ids_to_be_removed = set(
[
e
for e in self.hmm_hits_splits
if self.hmm_hits_splits[e]["hmm_hit_entry_id"]
in hmm_hits_entry_ids_associated_with_fragmented_hmm_hits
]
)
hmm_hits_splits_entry_ids_to_remove.update(
additional_entry_ids_to_be_removed
)
if len(hmm_hits_splits_entry_ids_to_remove):
for entry_id in hmm_hits_splits_entry_ids_to_remove:
self.hmm_hits_splits.pop(entry_id)
if len(hmm_hits_entry_ids_to_remove):
for entry_id in hmm_hits_entry_ids_to_remove:
self.hmm_hits.pop(entry_id)
filtered_num_split_names = len(
set(
self.hmm_hits_splits[entry_id]["split"]
for entry_id in self.hmm_hits_splits
)
)
if anvio.DEBUG:
self.progress.end()
self.run.warning(None, header="SequencesForHMMHits info")
self.run.info_single(
"%d split names of interest are found" % len(split_names_of_interest)
)
self.run.info("Total split names w/HMM hits", total_num_split_names)
self.run.info(
"Final split names w/HMM hits", filtered_num_split_names, nl_after=1
)
self.progress.new("Recovering sequences for HMM Hits")
self.progress.update("...")
if not len(self.hmm_hits):
# there are HMMs but no hits. FINE.
self.progress.end()
contigs_db.disconnect()
return
gene_caller_ids_of_interest = set(
[e["gene_callers_id"] for e in self.hmm_hits.values()]
)
where_clause_for_genes = "gene_callers_id in (%s)" % ", ".join(
["%d" % g for g in gene_caller_ids_of_interest]
)
self.progress.update(
"Recovering split and contig names for %d genes"
% (len(gene_caller_ids_of_interest))
)
split_names_of_interest, contig_names_of_interest = (
utils.get_split_and_contig_names_of_interest(
contigs_db_path, gene_caller_ids_of_interest
)
)
self.progress.update(
"Recovering contig seqs for %d genes" % (len(gene_caller_ids_of_interest))
)
where_clause_for_contigs = "contig in (%s)" % ", ".join(
['"%s"' % s for s in contig_names_of_interest]
)
self.contig_sequences = contigs_db.get_some_rows_from_table_as_dict(
t.contig_sequences_table_name,
string_the_key=True,
where_clause=where_clause_for_contigs,
)
self.progress.update(
"Recovering amino acid seqs for %d genes" % (len(gene_caller_ids_of_interest))
)
self.aa_sequences = contigs_db.get_some_rows_from_table_as_dict(
t.gene_amino_acid_sequences_table_name, where_clause=where_clause_for_genes
)
self.genes_in_contigs = contigs_db.get_some_rows_from_table_as_dict(
t.genes_in_contigs_table_name, where_clause=where_clause_for_genes
)
self.splits_in_contigs = list(split_names_of_interest)
self.progress.end()
contigs_db.disconnect()
|
def init_dicts(self, contigs_db_path, split_names_of_interest=set([])):
"""Initialize essential data for HMM stuff.
This function will do its best to not load any data that will not
be used later for HMM related operations. For instance, it will
learn which gene caller ids are of interest based on HMM sources,
and only recover data for splits and contigs based on that information,
not accessing a large fraction of a given contigs database.
"""
utils.is_contigs_db(contigs_db_path)
contigs_db = db.DB(
contigs_db_path,
anvio.__contigs__version__,
run=self.run,
progress=self.progress,
)
self.hmm_hits_info = contigs_db.get_table_as_dict(t.hmm_hits_info_table_name)
missing_sources = [s for s in self.sources if s not in self.hmm_hits_info]
if len(missing_sources):
contigs_db.disconnect()
progress.reset()
raise ConfigError(
"Bad news, Houston :/ The contigs database '%s' is missing one or more HMM sources "
"that you wished it didn't: '%s'."
% (contigs_db_path, ", ".join(missing_sources))
)
if not self.sources:
self.sources = set(list(self.hmm_hits_info.keys()))
if not self.sources:
# there is nothing to initialize..
return
self.progress.new("Recovering sequences for HMM Hits")
self.progress.update("...")
# get data from HMM tables based on sources of interest
self.progress.update(
"Getting data from HMM tables for %d source(s)" % len(self.sources)
)
where_clause_for_sources = "source in (%s)" % ", ".join(
['"%s"' % s for s in self.sources]
)
self.hmm_hits = contigs_db.get_some_rows_from_table_as_dict(
t.hmm_hits_table_name,
where_clause=where_clause_for_sources,
error_if_no_data=False,
)
self.hmm_hits_splits = contigs_db.get_some_rows_from_table_as_dict(
t.hmm_hits_splits_table_name,
where_clause=where_clause_for_sources,
error_if_no_data=False,
)
# if the user sent a split names of interest, it means they are interested in hits that only occur
# in a specific set of split names. NOTE: this really makes it very difficult to deal with HMM hits
# that span through multiple splits. here we will mark such HMM hits in `hmm_hits_splits_entry_ids_to_remove`
# for removal, but we will also keep track of those guys and let the user know what happened.
if len(split_names_of_interest):
total_num_split_names = len(
set(
self.hmm_hits_splits[entry_id]["split"]
for entry_id in self.hmm_hits_splits
)
)
hmm_hits_splits_entry_ids_to_remove = set([])
hmm_hits_entry_ids_to_remove = set([])
hmm_hits_entry_ids_associated_with_fragmented_hmm_hits = set([])
hmm_sources_associated_with_fragmented_hmm_hits = set([])
for entry_id in self.hmm_hits_splits:
if self.hmm_hits_splits[entry_id]["split"] not in split_names_of_interest:
hmm_hits_splits_entry_ids_to_remove.add(entry_id)
hmm_hits_entry_ids_to_remove.add(
self.hmm_hits_splits[entry_id]["hmm_hit_entry_id"]
)
if len(hmm_hits_splits_entry_ids_to_remove):
for entry_id in hmm_hits_splits_entry_ids_to_remove:
self.hmm_hits_splits.pop(entry_id)
if len(hmm_hits_entry_ids_to_remove):
for entry_id in hmm_hits_entry_ids_to_remove:
self.hmm_hits.pop(entry_id)
filtered_num_split_names = len(
set(
self.hmm_hits_splits[entry_id]["split"]
for entry_id in self.hmm_hits_splits
)
)
if anvio.DEBUG:
self.progress.end()
self.run.warning(None, header="SequencesForHMMHits info")
self.run.info_single(
"%d split names of interest are found" % len(split_names_of_interest)
)
self.run.info("Total split names w/HMM hits", total_num_split_names)
self.run.info(
"Final split names w/HMM hits", filtered_num_split_names, nl_after=1
)
self.progress.new("Recovering sequences for HMM Hits")
self.progress.update("...")
if not len(self.hmm_hits):
# there are HMMs but no hits. FINE.
self.progress.end()
contigs_db.disconnect()
return
gene_caller_ids_of_interest = set(
[e["gene_callers_id"] for e in self.hmm_hits.values()]
)
where_clause_for_genes = "gene_callers_id in (%s)" % ", ".join(
["%d" % g for g in gene_caller_ids_of_interest]
)
self.progress.update(
"Recovering split and contig names for %d genes"
% (len(gene_caller_ids_of_interest))
)
split_names_of_interest, contig_names_of_interest = (
utils.get_split_and_contig_names_of_interest(
contigs_db_path, gene_caller_ids_of_interest
)
)
self.progress.update(
"Recovering contig seqs for %d genes" % (len(gene_caller_ids_of_interest))
)
where_clause_for_contigs = "contig in (%s)" % ", ".join(
['"%s"' % s for s in contig_names_of_interest]
)
self.contig_sequences = contigs_db.get_some_rows_from_table_as_dict(
t.contig_sequences_table_name,
string_the_key=True,
where_clause=where_clause_for_contigs,
)
self.progress.update(
"Recovering amino acid seqs for %d genes" % (len(gene_caller_ids_of_interest))
)
self.aa_sequences = contigs_db.get_some_rows_from_table_as_dict(
t.gene_amino_acid_sequences_table_name, where_clause=where_clause_for_genes
)
self.genes_in_contigs = contigs_db.get_some_rows_from_table_as_dict(
t.genes_in_contigs_table_name, where_clause=where_clause_for_genes
)
self.splits_in_contigs = list(split_names_of_interest)
self.progress.end()
contigs_db.disconnect()
|
https://github.com/merenlab/anvio/issues/1593
|
[09 Dec 20 09:32:25 Summarizing 104 of 159: 'Bin_3_32'] Creating the FASTA file ... ETA: 1m18sTraceback (most recent call last):
File "/workdir/lab_envs/anvio/bin/anvi-summarize", line 118, in <module>
main(args)
File "/workdir/lab_envs/anvio/bin/anvi-summarize", line 63, in main
summary.process()
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/summarizer.py", line 874, in process
self.summary['collection'][bin_id] = bin.create()
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/summarizer.py", line 1273, in create
self.store_sequences_for_hmm_hits()
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/summarizer.py", line 1533, in store_sequences_for_hmm_hits
hmm_sequences_dict = s.get_sequences_dict_for_hmm_hits_in_splits({self.bin_id: self.split_names})
File "/workdir/lab_envs/anvio/lib/python3.6/site-packages/anvio/hmmops.py", line 336, in get_sequences_dict_for_hmm_hits_in_splits
hmm_hit = self.hmm_hits[split_entry['hmm_hit_entry_id']]
KeyError: 17702
|
KeyError
|
def is_amino_acid_functionally_conserved(amino_acid_residue_1, amino_acid_residue_2):
"""Checks if two amino acid residues are part of the same biochemical property group"""
group = constants.amino_acid_property_group[amino_acid_residue_1]
conserved_group = constants.conserved_amino_acid_groups[group]
if conserved_group and amino_acid_residue_2 in conserved_group:
return True
if (
group == "Polar and Nonpolar"
): # they fall in more than one group, multiple tests needed
if amino_acid_residue_1 == "H" and (
amino_acid_residue_2 in constants.conserved_amino_acid_groups["Nonpolar"]
or amino_acid_residue_2 in constants.conserved_amino_acid_groups["Bases"]
):
return True
if amino_acid_residue_1 == "Y" and (
amino_acid_residue_2 in constants.conserved_amino_acid_groups["Aromatic"]
):
return True
return False
|
def is_amino_acid_functionally_conserved(amino_acid_residue_1, amino_acid_residue_2):
"""Checks if two amino acid residues are part of the same biochemical property group"""
group = constants.amino_acid_property_group[amino_acid_residue_1]
conserved_group = constants.conserved_amino_acid_groups[group]
if amino_acid_residue_2 in conserved_group:
return True
if (
group == "Polar and Nonpolar"
): # they fall in more than one group, multiple tests needed
if amino_acid_residue_1 == "H" and (
amino_acid_residue_2 in constants.conserved_amino_acid_groups["Nonpolar"]
or amino_acid_residue_2 in constants.conserved_amino_acid_groups["Bases"]
):
return True
if amino_acid_residue_1 == "Y" and (
amino_acid_residue_2 in constants.conserved_amino_acid_groups["Aromatic"]
):
return True
return False
|
https://github.com/merenlab/anvio/issues/1349
|
* Gene clusters are initialized for all 121 gene clusters in the database.
[06 Feb 20 18:17:25 Computing gene cluster homogeneity indices] Initializing 1 threads... ETA: ∞:∞:∞
WARNING
==============================================================
Homogeneity indices computation for gene cluster GC_00000101 failed. This can
happen due to one of three reasons: (1) this gene cluster is named incorrectly,
does not exist in the database, or is formatted into the input dictionary
incorrectly, (2) there is an alignment mistake in the gene cluster, and not all
genes are aligned to be the same lenght; or (3) the homogeneity calculator was
initialized incorrectly. As you can see, this is a rare circumstance, and anvi'o
will set this gene cluster's homogeneity indices to `-1` so things can move on,
but we highly recommend you to take a look at your data to make sure you are
satisfied with your analysis.
Process Process-4:
Traceback (most recent call last):
File "/usr/local/projdata/0718/projects/jon/conda_envs/github/anvio/anvio/dbops.py", line 1165, in homogeneity_worker
funct_index, geo_index, combined_index = homogeneity_calculator.get_homogeneity_dicts(gene_cluster)
File "/usr/local/projdata/0718/projects/jon/conda_envs/github/anvio/anvio/homogeneityindex.py", line 174, in get_homogeneity_dicts
fun = self.compute_functional_index(cluster_sequences)
File "/usr/local/projdata/0718/projects/jon/conda_envs/github/anvio/anvio/homogeneityindex.py", line 61, in compute_functional_index
elif utils.is_amino_acid_functionally_conserved(amino_acid_residue_1,amino_acid_residue_2):
File "/usr/local/projdata/0718/projects/jon/conda_envs/github/anvio/anvio/utils.py", line 1564, in is_amino_acid_functionally_conserved
if amino_acid_residue_2 in conserved_group:
TypeError: argument of type 'NoneType' is not iterable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/projdata/0718/projects/jon/conda_envs/envs/anvio-master/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/local/projdata/0718/projects/jon/conda_envs/envs/anvio-master/lib/python3.6/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/projdata/0718/projects/jon/conda_envs/github/anvio/anvio/dbops.py", line 1176, in homogeneity_worker
combined_index[gene_cluster_name] = -1
UnboundLocalError: local variable 'combined_index' referenced before assignment
|
TypeError
|
def migrate(db_path):
if db_path is None:
raise ConfigError("No database path is given.")
# make sure someone is not being funny
utils.is_pan_db(db_path)
# make sure the version is accurate
pan_db = db.DB(db_path, None, ignore_version=True)
if str(pan_db.get_version()) != current_version:
raise ConfigError(
"Version of this pan database is not %s (hence, this script cannot really do anything)."
% current_version
)
progress.new("Trying to upgrade the pan database")
progress.update("...")
try:
pan_db.create_table(
item_orders_table_name, item_orders_table_structure, item_orders_table_types
)
except:
pass
clusterings = pan_db.get_table_as_dict("clusterings")
# move clustering data into the new table
for clustering in clusterings:
newick = clusterings[clustering]["newick"]
pan_db._exec(
"""INSERT INTO %s VALUES (?,?,?)""" % item_orders_table_name,
tuple([clustering, "newick", newick]),
)
# update keys
for old_key, new_key in [
("pc_min_occurrence", "gene_cluster_min_occurrence"),
("num_protein_clusters", "num_gene_clusters"),
("num_genes_in_protein_clusters", "num_genes_in_gene_clusters"),
("available_clusterings", "available_item_orders"),
("PCs_clustered", "PCs_ordered"),
("default_clustering", "default_item_order"),
]:
try:
pan_db.set_meta_value(new_key, pan_db.get_meta_value(old_key))
except:
pass
# remove stuff that are not irrelevant
try:
pan_db._exec("DROP TABLE clusterings;")
pan_db.remove_meta_key_value_pair("available_clusterings")
pan_db.remove_meta_key_value_pair("PCs_clustered")
pan_db.remove_meta_key_value_pair("default_clustering")
pan_db.remove_meta_key_value_pair("num_protein_clusters")
pan_db.remove_meta_key_value_pair("num_genes_in_protein_clusters")
pan_db.remove_meta_key_value_pair("pc_min_occurrence")
except:
pass
# commit
try:
pan_db._exec("COMMIT")
except:
pass
# cleanup
try:
pan_db._exec("vacuum")
except:
pass
# set the version
pan_db.remove_meta_key_value_pair("version")
pan_db.set_version(next_version)
# bye
pan_db.disconnect()
progress.end()
run.info_single(
"Your pan db is now %s." % next_version, nl_after=1, nl_before=1, mc="green"
)
|
def migrate(db_path):
if db_path is None:
raise ConfigError("No database path is given.")
# make sure someone is not being funny
utils.is_pan_db(db_path)
# make sure the version is accurate
pan_db = db.DB(db_path, None, ignore_version=True)
if str(pan_db.get_version()) != current_version:
raise ConfigError(
"Version of this pan database is not %s (hence, this script cannot really do anything)."
% current_version
)
progress.new("Trying to upgrade the pan database")
progress.update("...")
try:
pan_db.create_table(
t.item_orders_table_name,
t.item_orders_table_structure,
t.item_orders_table_types,
)
except:
pass
clusterings = pan_db.get_table_as_dict("clusterings")
# move clustering data into the new table
for clustering in clusterings:
newick = clusterings[clustering]["newick"]
pan_db._exec(
"""INSERT INTO %s VALUES (?,?,?)""" % t.item_orders_table_name,
tuple([clustering, "newick", newick]),
)
# update keys
for old_key, new_key in [
("pc_min_occurrence", "gene_cluster_min_occurrence"),
("num_protein_clusters", "num_gene_clusters"),
("num_genes_in_protein_clusters", "num_genes_in_gene_clusters"),
("available_clusterings", "available_item_orders"),
("PCs_clustered", "PCs_ordered"),
("default_clustering", "default_item_order"),
]:
try:
pan_db.set_meta_value(new_key, pan_db.get_meta_value(old_key))
except:
pass
# remove stuff that are not irrelevant
try:
pan_db._exec("DROP TABLE clusterings;")
pan_db.remove_meta_key_value_pair("available_clusterings")
pan_db.remove_meta_key_value_pair("PCs_clustered")
pan_db.remove_meta_key_value_pair("default_clustering")
pan_db.remove_meta_key_value_pair("num_protein_clusters")
pan_db.remove_meta_key_value_pair("num_genes_in_protein_clusters")
pan_db.remove_meta_key_value_pair("pc_min_occurrence")
except:
pass
# commit
try:
pan_db._exec("COMMIT")
except:
pass
# cleanup
try:
pan_db._exec("vacuum")
except:
pass
# set the version
pan_db.remove_meta_key_value_pair("version")
pan_db.set_version(next_version)
# bye
pan_db.disconnect()
progress.end()
run.info_single(
"Your pan db is now %s." % next_version, nl_after=1, nl_before=1, mc="green"
)
|
https://github.com/merenlab/anvio/issues/971
|
wifi-roaming-128-4-182-83:anvio_update sean$ anvi-migrate-db Zeta_Pangenome-PAN.db
/usr/local/Cellar/anvio/5.1/libexec/lib/python3.7/site-packages/matplotlib/__init__.py:1067: UserWarning: Duplicate key in file "/Users/sean/.matplotlib/matplotlibrc", line #2
(fname, cnt))
Database Path ................................: Zeta_Pangenome-PAN.db
Detected Type ................................: pan
Current Version ..............................: 5
Target Version ...............................: 12
[12 Sep 18 12:21:23 Trying to upgrade the pan database] ... Traceback (most recent call last):
File "/usr/local/bin/anvi-migrate-db", line 119, in <module>
Migrater(args_for_single_db).process()
File "/usr/local/bin/anvi-migrate-db", line 93, in process
migration_scripts[self.db_type][script_name].migrate(self.db_path)
File "/usr/local/Cellar/anvio/5.1/libexec/lib/python3.7/site-packages/anvio/migrations/pan/v5_to_v6.py", line 47, in migrate
pan_db._exec('''INSERT INTO %s VALUES (?,?,?)''' % t.item_orders_table_name, tuple([clustering, 'newick', newick]))
File "/usr/local/Cellar/anvio/5.1/libexec/lib/python3.7/site-packages/anvio/db.py", line 174, in _exec
ret_val = self.cursor.execute(sql_query, value)
sqlite3.OperationalError: table item_orders has 4 columns but 3 values were supplied
|
sqlite3.OperationalError
|
def __init__(self, args):
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
self.db_path = A("pan_or_profile_db") or A("profile_db") or A("pan_db")
self.just_do_it = A("just_do_it")
self.target_data_group_set_by_user = A("target_data_group") or None
self.target_data_group = self.target_data_group_set_by_user or "default"
if not self.db_path:
raise ConfigError(
"The AdditionalAndOrderDataBaseClass is inherited with an args object that did not\
contain any database path :/ Even though any of the following would\
have worked: `pan_or_profile_db`, `profile_db`, `pan_db` :("
)
if not self.table_name:
raise ConfigError(
"The AdditionalAndOrderDataBaseClass does not know anything about the table it should\
be working with."
)
utils.is_pan_or_profile_db(self.db_path)
self.db_type = utils.get_db_type(self.db_path)
self.db_version = utils.get_required_version_for_db(self.db_path)
database = db.DB(self.db_path, self.db_version)
self.additional_data_keys = database.get_single_column_from_table(
self.table_name, "data_key"
)
database.disconnect()
Table.__init__(self, self.db_path, self.db_version, self.run, self.progress)
self.nulls_per_type = {
"str": "",
"int": 0,
"float": 0,
"stackedbar": 0,
"unknown": None,
}
|
def __init__(self, args):
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
self.db_path = A("pan_or_profile_db") or A("profile_db") or A("pan_db")
self.just_do_it = A("just_do_it")
self.target_data_group_set_by_user = A("target_data_group") or None
self.target_data_group = self.target_data_group_set_by_user or "default"
if not self.db_path:
raise ConfigError(
"The AdditionalAndOrderDataBaseClass is inherited with an args object that did not\
contain any database path :/ Even though any of the following would\
have worked: `pan_or_profile_db`, `profile_db`, `pan_db` :("
)
if not self.table_name:
raise ConfigError(
"The AdditionalAndOrderDataBaseClass does not know anything about the table it should\
be working with."
)
utils.is_pan_or_profile_db(self.db_path)
self.db_type = utils.get_db_type(self.db_path)
self.db_version = utils.get_required_version_for_db(self.db_path)
database = db.DB(self.db_path, self.db_version)
self.additional_data_keys = database.get_single_column_from_table(
self.table_name, "data_key"
)
database.disconnect()
Table.__init__(self, self.db_path, self.db_version, self.run, self.progress)
self.nulls_per_type = {
"str": "",
"int": 0,
"float": 0,
"stackedbar": None,
"unknown": None,
}
|
https://github.com/merenlab/anvio/issues/972
|
Traceback (most recent call last):
File "/Users/evan/Software/anvio/bin/anvi-interactive", line 100, in <module>
d = interactive.Interactive(args)
File "/Users/evan/Software/anvio/anvio/interactive.py", line 133, in __init__
self.layers_additional_data_keys[group_name], self.layers_additional_data_dict[group_name]) if a_db_is_found else {}
File "/Users/evan/Software/anvio/anvio/tables/miscdata.py", line 355, in update_orders_dict_using_additional_data_dict
sum_stackbar_items[stackbar_name][layer] += float(additional_data_dict[layer][data_key])
TypeError: float() argument must be a string or a number, not 'NoneType'
|
TypeError
|
def update_orders_dict_using_additional_data_dict(
self, order_data_dict, additional_data_keys, additional_data_dict
):
if order_data_dict:
self.data_dict_sanity_check(order_data_dict, treat_data_dict_as="layer_orders")
# FIXME: here we need to check whether the two dictionaries are in fact 'compatible' with respect to sample names
# they describe.
self.data_dict_sanity_check(
additional_data_dict,
data_keys_list=additional_data_keys,
treat_data_dict_as="layers",
)
sum_stackbar_items = {}
for data_key in additional_data_keys:
if "!" in data_key:
stackbar_name = data_key.split("!")[0]
if stackbar_name not in sum_stackbar_items:
sum_stackbar_items[stackbar_name] = {}
for layer in additional_data_dict:
if layer not in sum_stackbar_items[stackbar_name]:
sum_stackbar_items[stackbar_name][layer] = 0.0
if additional_data_dict[layer][data_key]:
sum_stackbar_items[stackbar_name][layer] += float(
additional_data_dict[layer][data_key]
)
for data_key in additional_data_keys:
if "!" in data_key:
predicted_key_type = "stackedbar"
stacked_bar_name, item_name = data_key.split("!")
data_key_name = "%s [%s]" % (stacked_bar_name, item_name)
else:
type_class = utils.get_predicted_type_of_items_in_a_dict(
additional_data_dict, data_key
)
predicted_key_type = type_class.__name__ if type_class else "unknown"
data_key_name = data_key
if predicted_key_type == "stackedbar":
stackbar_name = data_key.split("!")[0]
layer_name_layer_data_tuples = []
for layer in additional_data_dict:
if additional_data_dict[layer][data_key]:
if sum_stackbar_items[stackbar_name][layer] == 0:
layer_name_layer_data_tuples.append((0, layer))
else:
layer_name_layer_data_tuples.append(
(
(
float(additional_data_dict[layer][data_key])
/ (1.0 * sum_stackbar_items[stackbar_name][layer])
),
layer,
)
)
else:
layer_name_layer_data_tuples.append(
(self.nulls_per_type[predicted_key_type], layer)
)
else:
layer_name_layer_data_tuples = [
(
additional_data_dict[layer][data_key]
if additional_data_dict[layer][data_key]
else self.nulls_per_type[predicted_key_type],
layer,
)
for layer in additional_data_dict
]
order_data_dict[">> " + data_key_name] = {
"newick": None,
"basic": ",".join([t[1] for t in sorted(layer_name_layer_data_tuples)]),
}
order_data_dict[">> " + data_key_name + " (reverse)"] = {
"newick": None,
"basic": ",".join(
[t[1] for t in sorted(layer_name_layer_data_tuples, reverse=True)]
),
}
return order_data_dict
|
def update_orders_dict_using_additional_data_dict(
self, order_data_dict, additional_data_keys, additional_data_dict
):
if order_data_dict:
self.data_dict_sanity_check(order_data_dict, treat_data_dict_as="layer_orders")
# FIXME: here we need to check whether the two dictionaries are in fact 'compatible' with respect to sample names
# they describe.
self.data_dict_sanity_check(
additional_data_dict,
data_keys_list=additional_data_keys,
treat_data_dict_as="layers",
)
sum_stackbar_items = {}
for data_key in additional_data_keys:
if "!" in data_key:
stackbar_name = data_key.split("!")[0]
if stackbar_name not in sum_stackbar_items:
sum_stackbar_items[stackbar_name] = {}
for layer in additional_data_dict:
if layer not in sum_stackbar_items[stackbar_name]:
sum_stackbar_items[stackbar_name][layer] = 0.0
sum_stackbar_items[stackbar_name][layer] += float(
additional_data_dict[layer][data_key]
)
for data_key in additional_data_keys:
if "!" in data_key:
predicted_key_type = "stackedbar"
stacked_bar_name, item_name = data_key.split("!")
data_key_name = "%s [%s]" % (stacked_bar_name, item_name)
else:
type_class = utils.get_predicted_type_of_items_in_a_dict(
additional_data_dict, data_key
)
predicted_key_type = type_class.__name__ if type_class else "unknown"
data_key_name = data_key
if predicted_key_type == "stackedbar":
stackbar_name = data_key.split("!")[0]
layer_name_layer_data_tuples = []
for layer in additional_data_dict:
if additional_data_dict[layer][data_key]:
if sum_stackbar_items[stackbar_name][layer] == 0:
layer_name_layer_data_tuples.append((0, layer))
else:
layer_name_layer_data_tuples.append(
(
(
float(additional_data_dict[layer][data_key])
/ (1.0 * sum_stackbar_items[stackbar_name][layer])
),
layer,
)
)
else:
layer_name_layer_data_tuples.append(
(self.nulls_per_type[predicted_key_type], layer)
)
else:
layer_name_layer_data_tuples = [
(
additional_data_dict[layer][data_key]
if additional_data_dict[layer][data_key]
else self.nulls_per_type[predicted_key_type],
layer,
)
for layer in additional_data_dict
]
order_data_dict[">> " + data_key_name] = {
"newick": None,
"basic": ",".join([t[1] for t in sorted(layer_name_layer_data_tuples)]),
}
order_data_dict[">> " + data_key_name + " (reverse)"] = {
"newick": None,
"basic": ",".join(
[t[1] for t in sorted(layer_name_layer_data_tuples, reverse=True)]
),
}
return order_data_dict
|
https://github.com/merenlab/anvio/issues/972
|
Traceback (most recent call last):
File "/Users/evan/Software/anvio/bin/anvi-interactive", line 100, in <module>
d = interactive.Interactive(args)
File "/Users/evan/Software/anvio/anvio/interactive.py", line 133, in __init__
self.layers_additional_data_keys[group_name], self.layers_additional_data_dict[group_name]) if a_db_is_found else {}
File "/Users/evan/Software/anvio/anvio/tables/miscdata.py", line 355, in update_orders_dict_using_additional_data_dict
sum_stackbar_items[stackbar_name][layer] += float(additional_data_dict[layer][data_key])
TypeError: float() argument must be a string or a number, not 'NoneType'
|
TypeError
|
def run_stdin(self, sequences_list, debug=False):
"""Takes a list of tuples for sequences, performs MSA using muscle, returns a dict.
>>> from anvio.drivers.muscle import Muscle
>>> m = Muscle()
>>> m.run_stdin([('seq1', 'ATCATCATCGA'), ('seq2', 'ATCGAGTCGAT')])
{u'seq1': u'ATCATCATCGA-', u'seq2': u'ATCG-AGTCGAT'}
"""
tmp_dir = filesnpaths.get_temp_directory_path()
log_file_path = os.path.join(tmp_dir, "00_log.txt")
self.run.info(
"Running %s" % self.program_name,
"%d seqeunces will be aligned" % len(sequences_list),
)
self.run.info("Log file path", log_file_path)
sequences_data = "".join([">%s\n%s\n" % (t[0], t[1]) for t in sequences_list])
cmd_line = [self.program_name, "-quiet"]
output = utils.run_command_STDIN(cmd_line, log_file_path, sequences_data)
if len(output) == 0 or output[0] != ">":
with open(log_file_path, "a") as log_file:
log_file.write(
"# THIS IS THE OUTPUT YOU ARE LOOKING FOR:\n\n%s\n" % (output)
)
raise ConfigError(
"Drivers::Muscle: Something went wrong with this run :/ The output does not\
look alright. You can find the output in this log file: %s"
% (log_file_path)
)
alignments = {}
# parse the output, and fill alignments
defline, seq = None, None
for line in [o for o in output.split("\n") if len(o)] + [">"]:
if line.startswith(">"):
if defline:
alignments[defline[1:]] = seq
defline, seq = line, None
else:
if not seq:
seq = line
else:
seq += line
if not debug:
shutil.rmtree(tmp_dir)
return alignments
|
def run_stdin(self, sequences_list, debug=False):
"""Takes a list of tuples for sequences, performs MSA using muscle, returns a dict.
>>> from anvio.drivers.muscle import Muscle
>>> m = Muscle()
>>> m.run_stdin([('seq1', 'ATCATCATCGA'), ('seq2', 'ATCGAGTCGAT')])
{u'seq1': u'ATCATCATCGA-', u'seq2': u'ATCG-AGTCGAT'}
"""
tmp_dir = filesnpaths.get_temp_directory_path()
log_file_path = os.path.join(tmp_dir, "00_log.txt")
self.run.info(
"Running %s" % self.program_name,
"%d seqeunces will be aligned" % len(sequences_list),
)
self.run.info("Log file path", log_file_path)
sequences_data = "".join([">%s\n%s\n" % (t[0], t[1]) for t in sequences_list])
cmd_line = [self.program_name, "-quiet"]
output = utils.run_command_STDIN(cmd_line, log_file_path, sequences_data)
if not output[0] == ">":
with open(log_file_path, "a") as log_file:
log_file.write(
"# THIS IS THE OUTPUT YOU ARE LOOKING FOR:\n\n%s\n" % (output)
)
raise ConfigError(
"Drivers::Muscle: Something went wrong with this run :/ The output does not\
look alright. You can find the output in this log file: %s"
% (log_file_path)
)
alignments = {}
# parse the output, and fill alignments
defline, seq = None, None
for line in [o for o in output.split("\n") if len(o)] + [">"]:
if line.startswith(">"):
if defline:
alignments[defline[1:]] = seq
defline, seq = line, None
else:
if not seq:
seq = line
else:
seq += line
if not debug:
shutil.rmtree(tmp_dir)
return alignments
|
https://github.com/merenlab/anvio/issues/735
|
[05 Feb 18 00:07:31 Aligning amino acid sequences for genes in gene cluste (...)Process Process-19:
Traceback (most recent call last):
File "/bioware/python-3.5.1-201606221454/lib/python3.5/multiprocessing/process.py", line 254, in _bootstrap
self.run()
File "/bioware/python-3.5.1-201606221454/lib/python3.5/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "/groups/merenlab/virtual-envs/anvio-master/anvio/anvio/panops.py", line 784, in alignment_worker
alignments = aligner(run=r).run_stdin(gene_sequences_in_gene_cluster)
File "/groups/merenlab/virtual-envs/anvio-master/anvio/anvio/drivers/muscle.py", line 64, in run_stdin
if not output[0] == '>':
IndexError: string index out of range
Process Process-31:
Traceback (most recent call last):
File "/bioware/python-3.5.1-201606221454/lib/python3.5/multiprocessing/process.py", line 254, in _bootstrap
self.run()
File "/bioware/python-3.5.1-201606221454/lib/python3.5/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "/groups/merenlab/virtual-envs/anvio-master/anvio/anvio/panops.py", line 784, in alignment_worker
alignments = aligner(run=r).run_stdin(gene_sequences_in_gene_cluster)
File "/groups/merenlab/virtual-envs/anvio-master/anvio/anvio/drivers/muscle.py", line 64, in run_stdin
if not output[0] == '>':
IndexError: string index out of range
[05 Feb 18 00:07:31 Aligning amino acid sequences for genes in gene cluste (...)
|
IndexError
|
def run_stdin(self, sequences_list, debug=False):
"""Takes a list of tuples for sequences, performs MSA using muscle, returns a dict.
>>> from anvio.drivers.muscle import Muscle
>>> m = Muscle()
>>> m.run_stdin([('seq1', 'ATCATCATCGA'), ('seq2', 'ATCGAGTCGAT')])
{u'seq1': u'ATCATCATCGA-', u'seq2': u'ATCG-AGTCGAT'}
"""
tmp_dir = filesnpaths.get_temp_directory_path()
log_file_path = os.path.join(tmp_dir, "00_log.txt")
self.run.info(
"Running %s" % self.program_name,
"%d seqeunces will be aligned" % len(sequences_list),
)
self.run.info("Log file path", log_file_path)
sequences_data = "".join([">%s\n%s\n" % (t[0], t[1]) for t in sequences_list])
cmd_line = [self.program_name, "-quiet"]
output = utils.run_command_STDIN(cmd_line, log_file_path, sequences_data)
if not (len(output) and output[0] == ">"):
with open(log_file_path, "a") as log_file:
log_file.write(
"# THIS IS THE OUTPUT YOU ARE LOOKING FOR:\n\n%s\n" % (output)
)
raise ConfigError(
"Drivers::Muscle: Something went wrong with this run :/ The output does not\
look alright. You can find the output in this log file: %s"
% (log_file_path)
)
alignments = {}
# parse the output, and fill alignments
defline, seq = None, None
for line in [o for o in output.split("\n") if len(o)] + [">"]:
if line.startswith(">"):
if defline:
alignments[defline[1:]] = seq
defline, seq = line, None
else:
if not seq:
seq = line
else:
seq += line
if not debug:
shutil.rmtree(tmp_dir)
return alignments
|
def run_stdin(self, sequences_list, debug=False):
"""Takes a list of tuples for sequences, performs MSA using muscle, returns a dict.
>>> from anvio.drivers.muscle import Muscle
>>> m = Muscle()
>>> m.run_stdin([('seq1', 'ATCATCATCGA'), ('seq2', 'ATCGAGTCGAT')])
{u'seq1': u'ATCATCATCGA-', u'seq2': u'ATCG-AGTCGAT'}
"""
tmp_dir = filesnpaths.get_temp_directory_path()
log_file_path = os.path.join(tmp_dir, "00_log.txt")
self.run.info(
"Running %s" % self.program_name,
"%d seqeunces will be aligned" % len(sequences_list),
)
self.run.info("Log file path", log_file_path)
sequences_data = "".join([">%s\n%s\n" % (t[0], t[1]) for t in sequences_list])
cmd_line = [self.program_name, "-quiet"]
output = utils.run_command_STDIN(cmd_line, log_file_path, sequences_data)
if len(output) == 0 or output[0] != ">":
with open(log_file_path, "a") as log_file:
log_file.write(
"# THIS IS THE OUTPUT YOU ARE LOOKING FOR:\n\n%s\n" % (output)
)
raise ConfigError(
"Drivers::Muscle: Something went wrong with this run :/ The output does not\
look alright. You can find the output in this log file: %s"
% (log_file_path)
)
alignments = {}
# parse the output, and fill alignments
defline, seq = None, None
for line in [o for o in output.split("\n") if len(o)] + [">"]:
if line.startswith(">"):
if defline:
alignments[defline[1:]] = seq
defline, seq = line, None
else:
if not seq:
seq = line
else:
seq += line
if not debug:
shutil.rmtree(tmp_dir)
return alignments
|
https://github.com/merenlab/anvio/issues/735
|
[05 Feb 18 00:07:31 Aligning amino acid sequences for genes in gene cluste (...)Process Process-19:
Traceback (most recent call last):
File "/bioware/python-3.5.1-201606221454/lib/python3.5/multiprocessing/process.py", line 254, in _bootstrap
self.run()
File "/bioware/python-3.5.1-201606221454/lib/python3.5/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "/groups/merenlab/virtual-envs/anvio-master/anvio/anvio/panops.py", line 784, in alignment_worker
alignments = aligner(run=r).run_stdin(gene_sequences_in_gene_cluster)
File "/groups/merenlab/virtual-envs/anvio-master/anvio/anvio/drivers/muscle.py", line 64, in run_stdin
if not output[0] == '>':
IndexError: string index out of range
Process Process-31:
Traceback (most recent call last):
File "/bioware/python-3.5.1-201606221454/lib/python3.5/multiprocessing/process.py", line 254, in _bootstrap
self.run()
File "/bioware/python-3.5.1-201606221454/lib/python3.5/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "/groups/merenlab/virtual-envs/anvio-master/anvio/anvio/panops.py", line 784, in alignment_worker
alignments = aligner(run=r).run_stdin(gene_sequences_in_gene_cluster)
File "/groups/merenlab/virtual-envs/anvio-master/anvio/anvio/drivers/muscle.py", line 64, in run_stdin
if not output[0] == '>':
IndexError: string index out of range
[05 Feb 18 00:07:31 Aligning amino acid sequences for genes in gene cluste (...)
|
IndexError
|
def store_hits_into_contigs_db(self):
if not self.hits:
raise ConfigError(
"COGs class has no hits to process. Did you forget to call search?"
)
cogs_data = COGsData(self.args)
cogs_data.init_p_id_to_cog_id_dict()
functions_dict = {}
self.__entry_id = 0
def add_entry(gene_callers_id, source, accession, function, e_value):
functions_dict[self.__entry_id] = {
"gene_callers_id": int(gene_callers_id),
"source": source,
"accession": accession,
"function": function,
"e_value": float(e_value),
}
self.__entry_id += 1
# let's keep track of hits that match to missing COGs
hits_for_missing_cogs = 0
missing_cogs_found = set([])
for gene_callers_id in self.hits:
ncbi_protein_id = self.hits[gene_callers_id]["hit"]
in_proteins_FASTA_not_in_cogs_CSV = []
if ncbi_protein_id not in cogs_data.p_id_to_cog_id:
in_proteins_FASTA_not_in_cogs_CSV.append(
(ncbi_protein_id, gene_callers_id),
)
else:
COG_ids = cogs_data.p_id_to_cog_id[ncbi_protein_id]
annotations = []
categories = set([])
for COG_id in COG_ids:
# is missing?
if COG_id in cogs_data.missing_cogs:
missing_cogs_found.add(COG_id)
hits_for_missing_cogs += 1
continue
# resolve categories
for category in cogs_data.cogs[COG_id]["categories"]:
categories.add(category)
# append annotation
annotations.append(cogs_data.cogs[COG_id]["annotation"])
# all these shitty heuristics... If there are multiple COG ids or categories, separate them from each other by '!!!' so parsing
# them later is possible. Am I embarrassed? Yes. Is there a better way of doing this efficiently? Absolutely. What time is it?
# 9pm. Where am I? In the lab. Is it OK for me to let this slip away if it means for me to go home sooner? Yes, probably. Am I
# gonna remember this crap in the code for the next two months at random times in the shower and feel bad about myself? Fuck yes.
add_entry(
gene_callers_id,
"COG_FUNCTION",
"!!!".join(COG_ids),
"!!!".join(annotations),
self.hits[gene_callers_id]["evalue"],
)
add_entry(
gene_callers_id,
"COG_CATEGORY",
"!!!".join(categories),
"!!!".join(categories),
0.0,
)
# store hits in contigs db.
gene_function_calls_table = dbops.TableForGeneFunctions(
self.contigs_db_path, self.run, self.progress
)
gene_function_calls_table.create(functions_dict)
if len(missing_cogs_found):
self.run.warning(
"Although your COGs are successfully added to the database, there were some COG IDs your genes hit\
were among the ones that were not described in the raw data. Here is the list of %d COG IDs that\
were hit %d times: %s."
% (
len(missing_cogs_found),
hits_for_missing_cogs,
", ".join(missing_cogs_found),
)
)
if len(in_proteins_FASTA_not_in_cogs_CSV):
# so some of the hits represented in the FASTA file from the NCBI were not put in the
# CSV file from NCBI to associate them with COGs
report_output_file_path = filesnpaths.get_temp_file_path()
report_output = open(report_output_file_path, "w")
report_output.write("anvio_gene_callers_id\tNCBI_protein_id\n")
for protein_id, gene_callers_id in in_proteins_FASTA_not_in_cogs_CSV:
report_output.write("%s\t%s\n" % (gene_callers_id, protein_id))
report_output.close()
self.run.warning(
"This is important. %s hits for your genes that appeared in the proteins FASTA file from the NCBI had protein\
IDs that were not described in the CSV file from the NCBI that associates each protein ID with a COG function.\
That's OK if you don't care. But if you would like to take a look, anvi'o stored a report\
file for you at %s"
% (len(in_proteins_FASTA_not_in_cogs_CSV), report_output_file_path)
)
|
def store_hits_into_contigs_db(self):
if not self.hits:
raise ConfigError(
"COGs class has no hits to process. Did you forget to call search?"
)
cogs_data = COGsData(self.args)
cogs_data.init_p_id_to_cog_id_dict()
functions_dict = {}
self.__entry_id = 0
def add_entry(gene_callers_id, source, accession, function, e_value):
functions_dict[self.__entry_id] = {
"gene_callers_id": int(gene_callers_id),
"source": source,
"accession": accession,
"function": function,
"e_value": float(e_value),
}
self.__entry_id += 1
# let's keep track of hits that match to missing COGs
hits_for_missing_cogs = 0
missing_cogs_found = set([])
for gene_callers_id in self.hits:
ncbi_protein_id = self.hits[gene_callers_id]["hit"]
COG_ids = cogs_data.p_id_to_cog_id[ncbi_protein_id]
annotations = []
categories = set([])
for COG_id in COG_ids:
# is missing?
if COG_id in cogs_data.missing_cogs:
missing_cogs_found.add(COG_id)
hits_for_missing_cogs += 1
continue
# resolve categories
for category in cogs_data.cogs[COG_id]["categories"]:
categories.add(category)
# append annotation
annotations.append(cogs_data.cogs[COG_id]["annotation"])
# all these shitty heuristics... If there are multiple COG ids or categories, separate them from each other by '!!!' so parsing
# them later is possible. Am I embarrassed? Yes. Is there a better way of doing this efficiently? Absolutely. What time is it?
# 9pm. Where am I? In the lab. Is it OK for me to let this slip away if it means for me to go home sooner? Yes, probably. Am I
# gonna remember this crap in the code for the next two months at random times in the shower and feel bad about myself? Fuck yes.
add_entry(
gene_callers_id,
"COG_FUNCTION",
"!!!".join(COG_ids),
"!!!".join(annotations),
self.hits[gene_callers_id]["evalue"],
)
add_entry(
gene_callers_id,
"COG_CATEGORY",
"!!!".join(categories),
"!!!".join(categories),
0.0,
)
# store hits in contigs db.
gene_function_calls_table = dbops.TableForGeneFunctions(
self.contigs_db_path, self.run, self.progress
)
gene_function_calls_table.create(functions_dict)
if len(missing_cogs_found):
self.run.warning(
"Although your COGs are successfully added to the database, there were some COG IDs your genes hit\
were among the ones that were not described in the raw data. Here is the list of %d COG IDs that\
were hit %d times: %s."
% (
len(missing_cogs_found),
hits_for_missing_cogs,
", ".join(missing_cogs_found),
)
)
|
https://github.com/merenlab/anvio/issues/673
|
Traceback (most recent call last):
File "/Users/meren/github/anvio/bin/anvi-run-ncbi-cogs", line 49, in <module>
main(args)
File "/Users/meren/github/anvio/bin/anvi-run-ncbi-cogs", line 28, in main
cogs.process()
File "/Users/meren/github/anvio/anvio/cogs.py", line 121, in process
self.store_hits_into_contigs_db()
File "/Users/meren/github/anvio/anvio/cogs.py", line 153, in store_hits_into_contigs_db
COG_ids = cogs_data.p_id_to_cog_id[ncbi_protein_id]
KeyError: '375255595'
|
KeyError
|
def __init__(self, args):
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
self.db_path = A("pan_or_profile_db") or A("profile_db") or A("pan_db")
self.just_do_it = A("just_do_it")
if not self.db_path:
raise ConfigError(
"The AdditionalAndOrderDataBaseClass is inherited with an args object that did not\
contain any database path :/ Even though any of the following would\
have worked: `pan_or_profile_db`, `profile_db`, `pan_db` :("
)
if not self.table_name:
raise ConfigError(
"The AdditionalAndOrderDataBaseClass does not know anything about the table it should\
be working with."
)
utils.is_pan_or_profile_db(self.db_path)
self.db_type = utils.get_db_type(self.db_path)
self.db_version = utils.get_required_version_for_db(self.db_path)
database = db.DB(self.db_path, self.db_version)
self.additional_data_keys = database.get_single_column_from_table(
self.table_name, "data_key"
)
database.disconnect()
Table.__init__(self, self.db_path, self.db_version, self.run, self.progress)
self.nulls_per_type = {
"str": "",
"int": 0,
"float": 0,
"stackedbar": None,
"unknown": None,
}
|
def __init__(self, args):
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
self.db_path = A("pan_or_profile_db") or A("profile_db") or A("pan_db")
self.just_do_it = A("just_do_it")
if not self.db_path:
raise ConfigError(
"The AdditionalAndOrderDataBaseClass is inherited with an args object that did not\
contain any database path :/ Even though any of the following would\
have worked: `pan_or_profile_db`, `profile_db`, `pan_db` :("
)
if not self.table_name:
raise ConfigError(
"The AdditionalAndOrderDataBaseClass does not know anything about the table it should\
be working with."
)
utils.is_pan_or_profile_db(self.db_path)
self.db_type = utils.get_db_type(self.db_path)
self.db_version = utils.get_required_version_for_db(self.db_path)
database = db.DB(self.db_path, self.db_version)
self.additional_data_keys = database.get_single_column_from_table(
self.table_name, "data_key"
)
database.disconnect()
Table.__init__(self, self.db_path, self.db_version, self.run, self.progress)
|
https://github.com/merenlab/anvio/issues/707
|
Traceback (most recent call last):
File "/Users/alonshaiber/github/anvio/bin/anvi-display-pan", line 74, in <module>
d = interactive.Interactive(args)
File "/Users/alonshaiber/github/anvio/anvio/interactive.py", line 106, in __init__
self.layers_order_data_dict = TableForLayerOrders(self.args).get(self.layers_additional_data_keys, self.layers_additional_data_dict) if a_db_is_found else {}
File "/Users/alonshaiber/github/anvio/anvio/tables/miscdata.py", line 256, in get
return self.update_orders_dict_using_additional_data_dict(d, additional_data_keys, additional_data_dict)
File "/Users/alonshaiber/github/anvio/anvio/tables/miscdata.py", line 276, in update_orders_dict_using_additional_data_dict
order_data_dict['>> ' + data_key] = {'newick': None, 'basic': ','.join([t[1] for t in sorted(layer_name_layer_data_tuples)])}
TypeError: '<' not supported between instances of 'NoneType' and 'str
|
TypeError
|
def update_orders_dict_using_additional_data_dict(
self, order_data_dict, additional_data_keys, additional_data_dict
):
if order_data_dict:
self.data_dict_sanity_check(order_data_dict, treat_data_dict_as="layer_orders")
self.data_dict_sanity_check(
additional_data_dict,
data_keys_list=additional_data_keys,
treat_data_dict_as="layers",
)
# FIXME: here we need to check whether the two dictionaries are in fact 'compatible' with respect to sample names
# they describe.
for data_key in additional_data_keys:
if "!" in data_key:
# we don't order stacked bar charts
continue
# predict the type for proper assignment of 'null' values
if "!" in data_key:
predicted_key_type = "stackedbar"
else:
type_class = utils.get_predicted_type_of_items_in_a_dict(
additional_data_dict, data_key
)
predicted_key_type = type_class.__name__ if type_class else "unknown"
layer_name_layer_data_tuples = [
(
additional_data_dict[layer][data_key]
if additional_data_dict[layer][data_key]
else self.nulls_per_type[predicted_key_type],
layer,
)
for layer in additional_data_dict
]
order_data_dict[">> " + data_key] = {
"newick": None,
"basic": ",".join([t[1] for t in sorted(layer_name_layer_data_tuples)]),
}
order_data_dict[">> " + data_key + " (reverse)"] = {
"newick": None,
"basic": ",".join(
[t[1] for t in sorted(layer_name_layer_data_tuples, reverse=True)]
),
}
return order_data_dict
|
def update_orders_dict_using_additional_data_dict(
self, order_data_dict, additional_data_keys, additional_data_dict
):
if order_data_dict:
self.data_dict_sanity_check(order_data_dict, treat_data_dict_as="layer_orders")
self.data_dict_sanity_check(
additional_data_dict,
data_keys_list=additional_data_keys,
treat_data_dict_as="layers",
)
# FIXME: here we need to check whether the two dictionaries are in fact 'compatible' with respect to sample names
# they describe.
for data_key in additional_data_keys:
if "!" in data_key:
# we don't order stacked bar charts
continue
layer_name_layer_data_tuples = [
(additional_data_dict[layer][data_key], layer)
for layer in additional_data_dict
]
order_data_dict[">> " + data_key] = {
"newick": None,
"basic": ",".join([t[1] for t in sorted(layer_name_layer_data_tuples)]),
}
order_data_dict[">> " + data_key + " (reverse)"] = {
"newick": None,
"basic": ",".join(
[t[1] for t in sorted(layer_name_layer_data_tuples, reverse=True)]
),
}
return order_data_dict
|
https://github.com/merenlab/anvio/issues/707
|
Traceback (most recent call last):
File "/Users/alonshaiber/github/anvio/bin/anvi-display-pan", line 74, in <module>
d = interactive.Interactive(args)
File "/Users/alonshaiber/github/anvio/anvio/interactive.py", line 106, in __init__
self.layers_order_data_dict = TableForLayerOrders(self.args).get(self.layers_additional_data_keys, self.layers_additional_data_dict) if a_db_is_found else {}
File "/Users/alonshaiber/github/anvio/anvio/tables/miscdata.py", line 256, in get
return self.update_orders_dict_using_additional_data_dict(d, additional_data_keys, additional_data_dict)
File "/Users/alonshaiber/github/anvio/anvio/tables/miscdata.py", line 276, in update_orders_dict_using_additional_data_dict
order_data_dict['>> ' + data_key] = {'newick': None, 'basic': ','.join([t[1] for t in sorted(layer_name_layer_data_tuples)])}
TypeError: '<' not supported between instances of 'NoneType' and 'str
|
TypeError
|
def get(self):
"""Will return the additional data keys and the dict."""
self.progress.new("Recovering additional keys and data for %s" % self.target)
self.progress.update("...")
database = db.DB(self.db_path, utils.get_required_version_for_db(self.db_path))
additional_data = database.get_table_as_dict(self.table_name)
additional_data_keys = database.get_single_column_from_table(
self.table_name, "data_key", unique=True
)
additional_data_item_names = database.get_single_column_from_table(
self.table_name, "item_name", unique=True
)
database.disconnect()
if not len(additional_data_item_names):
self.progress.end()
return [], {}
d = {}
for additional_data_item_name in additional_data_item_names:
d[additional_data_item_name] = {}
for entry in additional_data.values():
additional_data_item_name = entry["item_name"]
key = entry["data_key"]
value = entry["data_value"]
if entry["data_type"] in ["int", "float"]:
d[additional_data_item_name][key] = eval(entry["data_type"])(
value or self.nulls_per_type[entry["data_type"]]
)
else:
d[additional_data_item_name][key] = value
for additional_data_item_name in d:
for key in additional_data_keys:
if key not in d[additional_data_item_name]:
d[additional_data_item_name][key] = None
self.progress.end()
return additional_data_keys, d
|
def get(self):
"""Will return the additional data keys and the dict."""
self.progress.new("Recovering additional keys and data for %s" % self.target)
self.progress.update("...")
database = db.DB(self.db_path, utils.get_required_version_for_db(self.db_path))
additional_data = database.get_table_as_dict(self.table_name)
additional_data_keys = database.get_single_column_from_table(
self.table_name, "data_key", unique=True
)
additional_data_item_names = database.get_single_column_from_table(
self.table_name, "item_name", unique=True
)
database.disconnect()
if not len(additional_data_item_names):
self.progress.end()
return [], {}
d = {}
for additional_data_item_name in additional_data_item_names:
d[additional_data_item_name] = {}
for entry in additional_data.values():
additional_data_item_name = entry["item_name"]
key = entry["data_key"]
value = entry["data_value"]
if entry["data_type"] in ["int", "float"]:
d[additional_data_item_name][key] = eval(entry["data_type"])(value)
else:
d[additional_data_item_name][key] = value
for additional_data_item_name in d:
for key in additional_data_keys:
if key not in d[additional_data_item_name]:
d[additional_data_item_name][key] = None
self.progress.end()
return additional_data_keys, d
|
https://github.com/merenlab/anvio/issues/707
|
Traceback (most recent call last):
File "/Users/alonshaiber/github/anvio/bin/anvi-display-pan", line 74, in <module>
d = interactive.Interactive(args)
File "/Users/alonshaiber/github/anvio/anvio/interactive.py", line 106, in __init__
self.layers_order_data_dict = TableForLayerOrders(self.args).get(self.layers_additional_data_keys, self.layers_additional_data_dict) if a_db_is_found else {}
File "/Users/alonshaiber/github/anvio/anvio/tables/miscdata.py", line 256, in get
return self.update_orders_dict_using_additional_data_dict(d, additional_data_keys, additional_data_dict)
File "/Users/alonshaiber/github/anvio/anvio/tables/miscdata.py", line 276, in update_orders_dict_using_additional_data_dict
order_data_dict['>> ' + data_key] = {'newick': None, 'basic': ','.join([t[1] for t in sorted(layer_name_layer_data_tuples)])}
TypeError: '<' not supported between instances of 'NoneType' and 'str
|
TypeError
|
def generate_pan_db(self):
meta_values = {
"internal_genome_names": ",".join(self.internal_genome_names),
"external_genome_names": ",".join(self.external_genome_names),
"num_genomes": len(self.genomes),
"min_percent_identity": self.min_percent_identity,
"pc_min_occurrence": self.PC_min_occurrence,
"mcl_inflation": self.mcl_inflation,
"default_view": "PC_presence_absence",
"use_ncbi_blast": self.use_ncbi_blast,
"diamond_sensitive": self.sensitive,
"minbit": self.minbit,
"exclude_partial_gene_calls": self.exclude_partial_gene_calls,
"gene_alignments_computed": False if self.skip_alignments else True,
"genomes_storage_hash": self.genomes_storage_hash,
"project_name": self.project_name,
"PCs_ordered": False,
"description": self.description
if self.description
else "_No description is provided_",
}
dbops.PanDatabase(self.pan_db_path, quiet=False).create(meta_values)
|
def generate_pan_db(self):
meta_values = {
"internal_genome_names": ",".join(self.internal_genome_names),
"external_genome_names": ",".join(self.external_genome_names),
"num_genomes": len(self.genomes),
"min_percent_identity": self.min_percent_identity,
"pc_min_occurrence": self.PC_min_occurrence,
"mcl_inflation": self.mcl_inflation,
"default_view": "PC_presence_absence",
"use_ncbi_blast": self.use_ncbi_blast,
"diamond_sensitive": self.sensitive,
"minbit": self.minbit,
"exclude_partial_gene_calls": self.exclude_partial_gene_calls,
"gene_alignments_computed": False if self.skip_alignments else True,
"genomes_storage_hash": self.genomes_storage_hash,
"project_name": self.project_name,
"PCs_clustered": False,
"description": self.description
if self.description
else "_No description is provided_",
}
dbops.PanDatabase(self.pan_db_path, quiet=False).create(meta_values)
|
https://github.com/merenlab/anvio/issues/648
|
Traceback (most recent call last):
File "/home/vnsriniv/virtual-envs/anvio/bin/anvi-display-pan", line 4, in <module>
__import__('pkg_resources').run_script('anvio===3-master', 'anvi-display-pan')
File "/home/vnsriniv/virtual-envs/anvio/lib/python3.5/site-packages/pkg_resources/__init__.py", line 748, in run_script
self.require(requires)[0].run_script(script_name, ns)
File "/home/vnsriniv/virtual-envs/anvio/lib/python3.5/site-packages/pkg_resources/__init__.py", line 1517, in run_script
exec(code, namespace, namespace)
File "/home/vnsriniv/virtual-envs/anvio/lib/python3.5/site-packages/anvio-3_master-py3.5-linux-x86_64.egg/EGG-INFO/scripts/anvi-display-pan", line 75, in <module>
d = interactive.Interactive(args)
File "/home/vnsriniv/virtual-envs/anvio/lib/python3.5/site-packages/anvio-3_master-py3.5-linux-x86_64.egg/anvio/interactive.py", line 155, in __init__
self.load_pan_mode()
File "/home/vnsriniv/virtual-envs/anvio/lib/python3.5/site-packages/anvio-3_master-py3.5-linux-x86_64.egg/anvio/interactive.py", line 744, in load_pan_mode
PanSuperclass.__init__(self, self.args)
File "/home/vnsriniv/virtual-envs/anvio/lib/python3.5/site-packages/anvio-3_master-py3.5-linux-x86_64.egg/anvio/dbops.py", line 784, in __init__
if self.p_meta['PCs_ordered']:
KeyError: 'PCs_ordered'
|
KeyError
|
def __init__(self, args, r=run, p=progress):
self.args = args
self.run = r
self.progress = p
self.genome_names = []
self.protein_clusters = {}
self.protein_clusters_initialized = False
self.protein_cluster_names = set([])
self.protein_clusters_gene_alignments = {}
self.protein_clusters_gene_alignments_available = False
self.protein_clusters_function_sources = []
self.protein_clusters_functions_dict = {}
self.item_orders = {}
self.views = {}
self.collection_profile = {}
self.num_protein_clusters = None
self.num_genes_in_protein_clusters = None
self.genomes_storage_is_available = False
self.genomes_storage_has_functions = False
self.functions_initialized = False
try:
self.pan_db_path = self.args.pan_db
except:
self.run.warning(
"PanSuperclass class called with args without pan_db_path member! Returning prematurely."
)
return
filesnpaths.is_file_exists(self.pan_db_path)
self.progress.new("Initializing the pan database superclass")
self.progress.update("Creating an instance of the pan database")
pan_db = PanDatabase(self.pan_db_path, run=self.run, progress=self.progress)
self.progress.update("Setting profile self data dict")
self.p_meta = pan_db.meta
self.p_meta["creation_date"] = (
utils.get_time_to_date(self.p_meta["creation_date"])
if "creation_date" in self.p_meta
else "unknown"
)
self.p_meta["genome_names"] = sorted(
[
s.strip()
for s in self.p_meta["external_genome_names"].split(",")
+ self.p_meta["internal_genome_names"].split(",")
if s
]
)
self.p_meta["num_genomes"] = len(self.p_meta["genome_names"])
self.genome_names = self.p_meta["genome_names"]
self.protein_clusters_gene_alignments_available = self.p_meta[
"gene_alignments_computed"
]
# FIXME: Is this the future where the pan db version is > 6? Great. Then the if statement here no longer
# needs to check whether 'PCs_ordered' is a valid key in self.p_meta:
if "PCs_ordered" in self.p_meta and self.p_meta["PCs_ordered"]:
self.p_meta["available_item_orders"] = sorted(
[s.strip() for s in self.p_meta["available_item_orders"].split(",")]
)
self.item_orders = pan_db.db.get_table_as_dict(t.item_orders_table_name)
# we need to convert data for 'basic' item orders to array in order to avoid compatibility issues with
# other additional item orders in pan and full mode (otherwise interactive class gets complicated
# unnecessarily).
for item_order in self.item_orders:
if self.item_orders[item_order]["type"] == "basic":
try:
self.item_orders[item_order]["data"] = self.item_orders[item_order][
"data"
].split(",")
except:
raise ConfigError(
"Something is wrong with the basic order `%s` in this pan database :("
% (item_order)
)
else:
self.p_meta["available_item_orders"] = None
self.p_meta["default_item_order"] = None
self.item_orders = None
# recover all protein cluster names so others can access to this information
# without having to initialize anything
self.protein_cluster_names = set(
pan_db.db.get_single_column_from_table(
t.pan_protein_clusters_table_name, "protein_cluster_id"
)
)
pan_db.disconnect()
# create an instance of states table
self.states_table = TablesForStates(self.pan_db_path)
self.progress.end()
if "genomes_storage" in args.__dict__ and args.genomes_storage:
self.genomes_storage = auxiliarydataops.GenomesDataStorage(
args.genomes_storage,
self.p_meta["genomes_storage_hash"],
genome_names_to_focus=self.p_meta["genome_names"],
run=self.run,
progress=self.progress,
)
self.genomes_storage_is_available = True
self.genomes_storage_has_functions = (
self.genomes_storage.functions_are_available
)
self.run.info(
"Pan DB", "Initialized: %s (v. %s)" % (self.pan_db_path, anvio.__pan__version__)
)
|
def __init__(self, args, r=run, p=progress):
self.args = args
self.run = r
self.progress = p
self.genome_names = []
self.protein_clusters = {}
self.protein_clusters_initialized = False
self.protein_cluster_names = set([])
self.protein_clusters_gene_alignments = {}
self.protein_clusters_gene_alignments_available = False
self.protein_clusters_function_sources = []
self.protein_clusters_functions_dict = {}
self.item_orders = {}
self.views = {}
self.collection_profile = {}
self.num_protein_clusters = None
self.num_genes_in_protein_clusters = None
self.genomes_storage_is_available = False
self.genomes_storage_has_functions = False
self.functions_initialized = False
try:
self.pan_db_path = self.args.pan_db
except:
self.run.warning(
"PanSuperclass class called with args without pan_db_path member! Returning prematurely."
)
return
filesnpaths.is_file_exists(self.pan_db_path)
self.progress.new("Initializing the pan database superclass")
self.progress.update("Creating an instance of the pan database")
pan_db = PanDatabase(self.pan_db_path, run=self.run, progress=self.progress)
self.progress.update("Setting profile self data dict")
self.p_meta = pan_db.meta
self.p_meta["creation_date"] = (
utils.get_time_to_date(self.p_meta["creation_date"])
if "creation_date" in self.p_meta
else "unknown"
)
self.p_meta["genome_names"] = sorted(
[
s.strip()
for s in self.p_meta["external_genome_names"].split(",")
+ self.p_meta["internal_genome_names"].split(",")
if s
]
)
self.p_meta["num_genomes"] = len(self.p_meta["genome_names"])
self.genome_names = self.p_meta["genome_names"]
self.protein_clusters_gene_alignments_available = self.p_meta[
"gene_alignments_computed"
]
if self.p_meta["PCs_ordered"]:
self.p_meta["available_item_orders"] = sorted(
[s.strip() for s in self.p_meta["available_item_orders"].split(",")]
)
self.item_orders = pan_db.db.get_table_as_dict(t.item_orders_table_name)
# we need to convert data for 'basic' item orders to array in order to avoid compatibility issues with
# other additional item orders in pan and full mode (otherwise interactive class gets complicated
# unnecessarily).
for item_order in self.item_orders:
if self.item_orders[item_order]["type"] == "basic":
try:
self.item_orders[item_order]["data"] = self.item_orders[item_order][
"data"
].split(",")
except:
raise ConfigError(
"Something is wrong with the basic order `%s` in this pan database :("
% (item_order)
)
else:
self.p_meta["available_item_orders"] = None
self.p_meta["default_item_order"] = None
self.item_orders = None
# recover all protein cluster names so others can access to this information
# without having to initialize anything
self.protein_cluster_names = set(
pan_db.db.get_single_column_from_table(
t.pan_protein_clusters_table_name, "protein_cluster_id"
)
)
pan_db.disconnect()
# create an instance of states table
self.states_table = TablesForStates(self.pan_db_path)
self.progress.end()
if "genomes_storage" in args.__dict__ and args.genomes_storage:
self.genomes_storage = auxiliarydataops.GenomesDataStorage(
args.genomes_storage,
self.p_meta["genomes_storage_hash"],
genome_names_to_focus=self.p_meta["genome_names"],
run=self.run,
progress=self.progress,
)
self.genomes_storage_is_available = True
self.genomes_storage_has_functions = (
self.genomes_storage.functions_are_available
)
self.run.info(
"Pan DB", "Initialized: %s (v. %s)" % (self.pan_db_path, anvio.__pan__version__)
)
|
https://github.com/merenlab/anvio/issues/648
|
Traceback (most recent call last):
File "/home/vnsriniv/virtual-envs/anvio/bin/anvi-display-pan", line 4, in <module>
__import__('pkg_resources').run_script('anvio===3-master', 'anvi-display-pan')
File "/home/vnsriniv/virtual-envs/anvio/lib/python3.5/site-packages/pkg_resources/__init__.py", line 748, in run_script
self.require(requires)[0].run_script(script_name, ns)
File "/home/vnsriniv/virtual-envs/anvio/lib/python3.5/site-packages/pkg_resources/__init__.py", line 1517, in run_script
exec(code, namespace, namespace)
File "/home/vnsriniv/virtual-envs/anvio/lib/python3.5/site-packages/anvio-3_master-py3.5-linux-x86_64.egg/EGG-INFO/scripts/anvi-display-pan", line 75, in <module>
d = interactive.Interactive(args)
File "/home/vnsriniv/virtual-envs/anvio/lib/python3.5/site-packages/anvio-3_master-py3.5-linux-x86_64.egg/anvio/interactive.py", line 155, in __init__
self.load_pan_mode()
File "/home/vnsriniv/virtual-envs/anvio/lib/python3.5/site-packages/anvio-3_master-py3.5-linux-x86_64.egg/anvio/interactive.py", line 744, in load_pan_mode
PanSuperclass.__init__(self, self.args)
File "/home/vnsriniv/virtual-envs/anvio/lib/python3.5/site-packages/anvio-3_master-py3.5-linux-x86_64.egg/anvio/dbops.py", line 784, in __init__
if self.p_meta['PCs_ordered']:
KeyError: 'PCs_ordered'
|
KeyError
|
def share_project(request, userdb, response):
set_default_headers(response)
return json.dumps(
userdb.create_view(
get_user(request, userdb, response),
request.forms.get("name"),
request.forms.get("project"),
request.forms.get("public"),
)
)
|
def share_project(request, userdb, response):
set_default_headers(response)
return json.dumps(
share=userdb.create_view(
get_user(request, userdb, response),
request.forms.get("name"),
request.forms.get("project"),
request.forms.get("public"),
)
)
|
https://github.com/merenlab/anvio/issues/253
|
Traceback (most recent call last):
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/bottle.py", line 862, in _handle
return route.call(**args)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/bottle.py", line 1732, in wrapper
rv = callback(*a, **ka)
File "/Users/meren/Desktop/MBL/anvio/bin/anvi-server", line 169, in share_project
return multiroutes.share_project(request, userdb, response)
File "/Users/meren/Desktop/MBL/anvio/anvio/bottleroutes_multi.py", line 124, in share_project
return json.dumps(share = userdb.create_view(get_user(request, userdb, response), request.forms.get('name'), request.forms.get('project'), request.forms.get('public')))
File "/Users/meren/Desktop/MBL/anvio/anvio/usermgmt.py", line 670, in create_view
p = (pname, login)
NameError: global name 'login' is not defined
|
NameError
|
def get_serial(self):
"""
Get serial number for device
:return: String of the serial number
:rtype: str
"""
# TODO raise exception if serial cant be got and handle during device add
if self._serial is None:
serial_path = os.path.join(self._device_path, "device_serial")
count = 0
serial = ""
while len(serial) == 0:
if count >= 5:
break
try:
with open(serial_path, "r") as f:
serial = f.read().strip()
except (PermissionError, OSError) as err:
self.logger.warning("getting serial: {0}".format(err))
serial = ""
except UnicodeDecodeError as err:
self.logger.warning("malformed serial: {0}".format(err))
serial = ""
count += 1
time.sleep(0.1)
if len(serial) == 0:
self.logger.debug("getting serial: {0} count:{1}".format(serial, count))
if (
serial == ""
or serial == "Default string"
or serial == "empty (NULL)"
or serial == "As printed in the D cover"
):
serial = "UNKWN{0:012}".format(random.randint(0, 4096))
self._serial = serial.replace(" ", "_")
return self._serial
|
def get_serial(self):
"""
Get serial number for device
:return: String of the serial number
:rtype: str
"""
# TODO raise exception if serial cant be got and handle during device add
if self._serial is None:
serial_path = os.path.join(self._device_path, "device_serial")
count = 0
serial = ""
while len(serial) == 0:
if count >= 5:
break
try:
with open(serial_path, "r") as f:
serial = f.read().strip()
except (PermissionError, OSError) as err:
self.logger.warning("getting serial: {0}".format(err))
serial = ""
count += 1
time.sleep(0.1)
if len(serial) == 0:
self.logger.debug("getting serial: {0} count:{1}".format(serial, count))
if (
serial == ""
or serial == "Default string"
or serial == "empty (NULL)"
or serial == "As printed in the D cover"
):
serial = "UNKWN{0:012}".format(random.randint(0, 4096))
self._serial = serial.replace(" ", "_")
return self._serial
|
https://github.com/openrazer/openrazer/issues/558
|
Starting daemon.
2018-03-25 18:28:24 | razer | INFO | Initialising Daemon (v2.2.2). Pid: 6168
2018-03-25 18:28:24 | razer.screensaver | INFO | Initialising DBus Screensaver Monitor
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerAbyssus ---------------------------- (1532:0042)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerAbyssus1800 ------------------------ (1532:0020)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerAbyssusV2 -------------------------- (1532:005B)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerAnansi ----------------------------- (1532:010F)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBlackWidowChroma ------------------- (1532:0203)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBlackWidowChromaOverwatch ---------- (1532:0211)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBlackWidowChromaTournamentEdition -- (1532:0209)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBlackWidowChromaV2 ----------------- (1532:0221)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBlackWidowClassic ------------------ (1532:011B)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBlackWidowClassicAlternate --------- (1532:010E)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBlackWidowUltimate2012 ------------- (1532:010D)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBlackWidowUltimate2013 ------------- (1532:011A)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBlackWidowUltimate2016 ------------- (1532:0214)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBlackWidowXChroma ------------------ (1532:0216)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBlackWidowXTournamentEditionChroma - (1532:021A)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBlackWidowXUltimate ---------------- (1532:0217)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBladeLate2016 ---------------------- (1532:0224)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBladePro2017 ----------------------- (1532:0225)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBladePro2017FullHD ----------------- (1532:022F)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBladeProLate2016 ------------------- (1532:0210)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBladeQHD --------------------------- (1532:020F)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBladeStealth ----------------------- (1532:0205)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBladeStealthLate2016 --------------- (1532:0220)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBladeStealthLate2017 --------------- (1532:0232)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerBladeStealthMid2017 ---------------- (1532:022D)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerChromaMugHolder -------------------- (1532:0F07)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerCore ------------------------------- (1532:0215)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerDeathAdderChroma ------------------- (1532:0043)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerDeathAdderElite -------------------- (1532:005C)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerDeathStalkerChroma ----------------- (1532:0204)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerDeathStalkerExpert ----------------- (1532:0202)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerDiamondbackChroma ------------------ (1532:004C)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerFirefly ---------------------------- (1532:0C00)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerImperator -------------------------- (1532:002F)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerKraken ----------------------------- (1532:0504)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerKrakenClassic ---------------------- (1532:0501)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerKrakenV2 --------------------------- (1532:0510)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerMamba2012Wired --------------------- (1532:0024)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerMamba2012Wireless ------------------ (1532:0025)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerMambaChromaWired ------------------- (1532:0044)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerMambaChromaWireless ---------------- (1532:0045)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerMambaTE ---------------------------- (1532:0046)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerNaga2014 --------------------------- (1532:0040)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerNagaChroma ------------------------- (1532:0053)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerNagaHex ---------------------------- (1532:0041)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerNagaHexRed ------------------------- (1532:0036)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerNagaHexV2 -------------------------- (1532:0050)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerNostromo --------------------------- (1532:0111)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerOrbweaver -------------------------- (1532:0113)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerOrbweaverChroma -------------------- (1532:0207)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerOrnata ----------------------------- (1532:021F)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerOrnataChroma ----------------------- (1532:021E)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerOrochi2011 ------------------------- (1532:0013)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerOrochi2013 ------------------------- (1532:0039)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerOrochiWired ------------------------ (1532:0048)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerOuroboros -------------------------- (1532:0032)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerTaipan ----------------------------- (1532:0034)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerTartarus --------------------------- (1532:0201)
2018-03-25 18:28:24 | razer | DEBUG | Loaded device specification: RazerTartarusChroma --------------------- (1532:0208)
2018-03-25 18:28:24 | razer | INFO | Found device.0: 0003:1532:0204.0003
2018-03-25 18:28:24 | razer.device0 | INFO | Initialising device.0 RazerDeathStalkerChroma
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/daemonize.py", line 245, in start
self.action(*privileged_action_result)
File "/usr/bin/openrazer-daemon", line 126, in run_daemon
test_dir=args.test_dir)
File "/usr/lib/python3/dist-packages/openrazer_daemon/daemon.py", line 107, in __init__
self._load_devices(first_run=True)
File "/usr/lib/python3/dist-packages/openrazer_daemon/daemon.py", line 398, in _load_devices
razer_device = device_class(sys_path, device_number, self._config, testing=self._test_dir is not None, additional_interfaces=sorted(additional_interfaces))
File "/usr/lib/python3/dist-packages/openrazer_daemon/hardware/keyboards.py", line 50, in __init__
super(_RippleKeyboard, self).__init__(*args, **kwargs)
File "/usr/lib/python3/dist-packages/openrazer_daemon/hardware/keyboards.py", line 19, in __init__
super(_MacroKeyboard, self).__init__(*args, **kwargs)
File "/usr/lib/python3/dist-packages/openrazer_daemon/hardware/device_base.py", line 477, in __init__
super().__init__(device_path, device_number, config, testing, additional_interfaces, additional_methods)
File "/usr/lib/python3/dist-packages/openrazer_daemon/hardware/device_base.py", line 440, in __init__
super().__init__(device_path, device_number, config, testing, additional_interfaces, additional_methods)
File "/usr/lib/python3/dist-packages/openrazer_daemon/hardware/device_base.py", line 64, in __init__
self.serial = self.get_serial()
File "/usr/lib/python3/dist-packages/openrazer_daemon/hardware/device_base.py", line 207, in get_serial
serial = open(serial_path, 'r').read().strip()
File "/usr/lib/python3.6/codecs.py", line 321, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xb6 in position 2: invalid start byte
Exception ignored in: <object repr() failed>
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/openrazer_daemon/hardware/device_base.py", line 426, in __del__
self.close()
File "/usr/lib/python3/dist-packages/openrazer_daemon/hardware/device_base.py", line 339, in close
if not self._is_closed:
AttributeError: 'RazerDeathStalkerChroma' object has no attribute '_is_closed'
Stopping daemon.
|
UnicodeDecodeError
|
def run():
global args
logger = None
args = parse_args()
if args.stop:
stop_daemon(args)
sys.exit(0)
if args.respawn:
stop_daemon(args)
time.sleep(3)
# daemonize logs exceptions to its logger (which defaults to the syslog)
# and does not make them appear on stdout/stderr. If we're in foreground
# mode, override that logger with our own.
if not args.foreground:
logger = logging.getLogger("run-daemon")
if args.verbose:
logger.setLevel(logging.DEBUG)
install_example_config_file()
os.makedirs(args.run_dir, exist_ok=True)
daemon = Daemonize(
app="openrazer-daemon",
pid=os.path.join(args.run_dir, "openrazer-daemon.pid"),
action=run_daemon,
foreground=args.foreground,
verbose=args.verbose,
chdir=args.run_dir,
logger=logger,
)
daemon.start()
|
def run():
global args
args = parse_args()
if args.stop:
stop_daemon(args)
sys.exit(0)
if args.respawn:
stop_daemon(args)
time.sleep(3)
install_example_config_file()
os.makedirs(args.run_dir, exist_ok=True)
daemon = Daemonize(
app="openrazer-daemon",
pid=os.path.join(args.run_dir, "openrazer-daemon.pid"),
action=run_daemon,
foreground=args.foreground,
verbose=args.verbose,
chdir=args.run_dir,
)
daemon.start()
|
https://github.com/openrazer/openrazer/issues/431
|
thunderrd@Q6600:~$ openrazer-daemon -Fv
2017-11-01 21:44:38 | razer | INFO | Initialising Daemon (v2.0.0). Pid: 13092
2017-11-01 21:44:38 | razer.screensaver | INFO | Initialising DBus Screensaver Monitor
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerAbyssus ---------------------------- (1532:0042)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerAbyssusV2 -------------------------- (1532:005B)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerAnansi ----------------------------- (1532:010F)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBlackWidowChroma ------------------- (1532:0203)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBlackWidowChromaOverwatch ---------- (1532:0211)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBlackWidowChromaTournamentEdition -- (1532:0209)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBlackWidowChromaV2 ----------------- (1532:0221)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification:
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBlackWidowClassic ------------------ (1532:011B)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBlackWidowClassicAlternate --------- (1532:010E)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBlackWidowUltimate2012 ------------- (1532:010D)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBlackWidowUltimate2013 ------------- (1532:011A)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBlackWidowUltimate2016 ------------- (1532:0214)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBlackWidowXChroma ------------------ (1532:0216)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBlackWidowXTournamentEditionChroma - (1532:021A)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBlackWidowXUltimate ---------------- (1532:0217)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBladeLate2016 ---------------------- (1532:0224)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBladePro2017 ----------------------- (1532:0225)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBladeProLate2016 ------------------- (1532:0210)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBladeQHD --------------------------- (1532:020F)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBladeStealth ----------------------- (1532:0205)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBladeStealthLate2016 --------------- (1532:0220)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBladeStealthLate2017 --------------- (1532:0232)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerBladeStealthMid2017 ---------------- (1532:022D)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerChromaMugHolder -------------------- (1532:0F07)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerCore ------------------------------- (1532:0215)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerDeathAdderChroma ------------------- (1532:0043)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerDeathAdderElite -------------------- (1532:005C)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerDeathStalkerChroma ----------------- (1532:0204)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerDeathStalkerExpert ----------------- (1532:0202)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerDiamondbackChroma ------------------ (1532:004C)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerFirefly ---------------------------- (1532:0C00)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerImperator -------------------------- (1532:002F)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerKraken ----------------------------- (1532:0504)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerKrakenClassic ---------------------- (1532:0501)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerKrakenV2 --------------------------- (1532:0510)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerMamba2012Wired --------------------- (1532:0024)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerMamba2012Wireless ------------------ (1532:0025)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerMambaChromaWired ------------------- (1532:0044)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerMambaChromaWireless ---------------- (1532:0045)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerMambaTE ---------------------------- (1532:0046)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerNaga2014 --------------------------- (1532:0040)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerNagaChroma ------------------------- (1532:0053)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerNagaHex ---------------------------- (1532:0041)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerNagaHexRed ------------------------- (1532:0036)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerNagaHexV2 -------------------------- (1532:0050)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerNostromo --------------------------- (1532:0111)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerOrbweaver -------------------------- (1532:0113)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerOrbweaverChroma -------------------- (1532:0207)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerOrnata ----------------------------- (1532:021F)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerOrnataChroma ----------------------- (1532:021E)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerOrochi2011 ------------------------- (1532:0013)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerOrochi2013 ------------------------- (1532:0039)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerOrochiWired ------------------------ (1532:0048)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerOuroboros -------------------------- (1532:0032)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerTaipan ----------------------------- (1532:0034)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerTartarus --------------------------- (1532:0201)
2017-11-01 21:44:38 | razer | DEBUG | Loaded device specification: RazerTartarusChroma --------------------- (1532:0208)
2017-11-01 21:44:38 | razer | INFO | Found device.0: 0003:1532:0214.0003
Exception ignored in: <object repr() failed>
Traceback (most recent call last):
File "/usr/lib64/python3.6/site-packages/openrazer_daemon/hardware/device_base.py", line 421, in __del__
self.close()
File "/usr/lib64/python3.6/site-packages/openrazer_daemon/hardware/device_base.py", line 334, in close
if not self._is_closed:
AttributeError: 'RazerBlackWidowUltimate2016' object has no attribute '_is_closed'
thunderrd@Q6600:~$
|
AttributeError
|
async def get(self, key):
"""Get data from Redis for a given key.
Args:
key (string): The key to lookup in the database.
Returns:
object or None: The data object stored for that key, or None if no
object found for that key.
"""
if self.client:
_LOGGER.debug(_("Getting %s from Redis."), key)
data = await self.client.execute("GET", key)
if data:
return json.loads(data, object_hook=JSONDecoder())
return None
|
async def get(self, key):
"""Get data from Redis for a given key.
Args:
key (string): The key to lookup in the database.
Returns:
object or None: The data object stored for that key, or None if no
object found for that key.
"""
if self.client:
_LOGGER.debug(_("Getting %s from Redis."), key)
data = await self.client.execute("GET", key)
if data:
return json.loads(data, encoding=JSONDecoder)
return None
|
https://github.com/opsdroid/opsdroid/issues/1673
|
notrexroof_1 | DEBUG opsdroid.memory: Putting seen to memory.
notrexroof_1 | DEBUG opsdroid.database.redis: Putting seen into Redis.
notrexroof_1 | ERROR opsdroid.core: Exception when running skill 'seen'.
notrexroof_1 | Traceback (most recent call last):
notrexroof_1 | File "/usr/local/lib/python3.8/site-packages/opsdroid/core.py", line 427, in run_skill
notrexroof_1 | return await skill(self, config, event)
notrexroof_1 | File "/root/.local/share/opsdroid/opsdroid-modules/skill/seen/__init__.py", line 16, in last_seen
notrexroof_1 | await message.respond("I last saw {} {}".format(name, human(seen[name], precision=1)))
notrexroof_1 | File "/root/.local/share/opsdroid/site-packages/ago.py", line 55, in human
notrexroof_1 | delta = get_delta_from_subject(subject)
notrexroof_1 | File "/root/.local/share/opsdroid/site-packages/ago.py", line 16, in get_delta_from_subject
notrexroof_1 | subject = float(subject)
notrexroof_1 | TypeError: float() argument must be a string or a number, not 'dict'
|
TypeError
|
async def train_parsers(self, skills):
"""Train the parsers.
Args:
skills (list): A list of all the loaded skills.
"""
if "parsers" in self.modules:
parsers = self.modules.get("parsers", {})
rasanlu = get_parser_config("rasanlu", parsers)
if rasanlu and rasanlu["enabled"]:
await train_rasanlu(rasanlu, skills)
|
async def train_parsers(self, skills):
"""Train the parsers.
Args:
skills (list): A list of all the loaded skills.
"""
if "parsers" in self.config:
parsers = self.config["parsers"] or {}
rasanlu = parsers.get("rasanlu")
if rasanlu and rasanlu["enabled"]:
await train_rasanlu(rasanlu, skills)
|
https://github.com/opsdroid/opsdroid/issues/1554
|
INFO opsdroid.logging: ========================================
INFO opsdroid.logging: Started opsdroid v0.8.1+833.g5c8dbb7.dirty.
INFO opsdroid: ========================================
INFO opsdroid: You can customise your opsdroid by modifying your configuration.yaml.
INFO opsdroid: Read more at: http://opsdroid.readthedocs.io/#configuration
INFO opsdroid: Watch the Get Started Videos at: http://bit.ly/2fnC0Fh
INFO opsdroid: Install Opsdroid Desktop at:
https://github.com/opsdroid/opsdroid-desktop/releases
INFO opsdroid: ========================================
DEBUG asyncio: Using selector: KqueueSelector
DEBUG opsdroid.loader: Loaded loader.
DEBUG opsdroid.loader: Loading modules from config...
WARNING opsdroid.loader: No databases in configuration. This will cause skills which store things in memory to lose data when opsdroid is restarted.
DEBUG opsdroid.loader: Loading parsers modules...
DEBUG opsdroid.loader: Loaded parsers: opsdroid.parsers.dialogflow.
DEBUG opsdroid.loader: Loading skill modules...
DEBUG opsdroid.loader: Updating dance...
DEBUG opsdroid.loader: b'Already up to date.'
DEBUG opsdroid.loader: b'Current branch master is up to date.'
DEBUG opsdroid.loader: Couldn't find the file requirements.txt, skipping.
DEBUG opsdroid.loader: Loaded skill: opsdroid-modules.skill.dance.
DEBUG opsdroid.loader: Updating hello...
DEBUG opsdroid.loader: b'Already up to date.'
DEBUG opsdroid.loader: b'Current branch master is up to date.'
DEBUG opsdroid.loader: Couldn't find the file requirements.txt, skipping.
DEBUG opsdroid.loader: Loaded skill: opsdroid-modules.skill.hello.
DEBUG opsdroid.loader: Updating loudnoises...
DEBUG opsdroid.loader: b'Already up to date.'
DEBUG opsdroid.loader: b'Current branch master is up to date.'
DEBUG opsdroid.loader: Couldn't find the file requirements.txt, skipping.
DEBUG opsdroid.loader: Loaded skill: opsdroid-modules.skill.loudnoises.
DEBUG opsdroid.loader: Updating seen...
DEBUG opsdroid.loader: b'Already up to date.'
DEBUG opsdroid.loader: b'Current branch master is up to date.'
DEBUG opsdroid.loader: b'Processing /Users/fabiorosado/Library/Caches/pip/wheels/c8/61/2f/47076152dc9487142c2ae48754c87539ff0993decf0fc2f198/ago-0.0.93-py3-none-any.whl'
DEBUG opsdroid.loader: b'Installing collected packages: ago'
DEBUG opsdroid.loader: b'Successfully installed ago-0.0.93'
DEBUG opsdroid.loader: b'WARNING: Target directory /Users/fabiorosado/Library/Application Support/opsdroid/site-packages/ago.py already exists. Specify --upgrade to force replacement.'
DEBUG opsdroid.loader: b'WARNING: Target directory /Users/fabiorosado/Library/Application Support/opsdroid/site-packages/__pycache__ already exists. Specify --upgrade to force replacement.'
DEBUG opsdroid.loader: b'WARNING: Target directory /Users/fabiorosado/Library/Application Support/opsdroid/site-packages/ago-0.0.93.dist-info already exists. Specify --upgrade to force replacement.'
DEBUG opsdroid.loader: Loaded skill: opsdroid-modules.skill.seen.
DEBUG opsdroid.loader: Loading connector modules...
DEBUG opsdroid.loader: Loaded connector: opsdroid.connector.websocket.
DEBUG opsdroid.loader: Loaded connector: opsdroid.connector.shell.
DEBUG opsdroid.core: Loaded 4 skills.
DEBUG opsdroid.connector.websocket: Starting Websocket connector.
DEBUG opsdroid.connector.shell: Loaded shell Connector.
DEBUG opsdroid.connector.shell: Connecting to shell.
INFO opsdroid.web: Started web server on http://0.0.0.0:8080
INFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.
opsdroid> DEBUG opsdroid.memory: Getting seen from memory.
DEBUG opsdroid.memory: Putting seen to memory.
hi
DEBUG opsdroid.core: Parsing input: <opsdroid.events.Message(text=hi)>.
DEBUG opsdroid.core: Processing parsers...
DEBUG opsdroid.memory: Getting seen from memory.
DEBUG opsdroid.memory: Putting seen to memory.
DEBUG asyncio: Using selector: KqueueSelector
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/Users/fabiorosado/Documents/GitHub.tmp/opsdroid/opsdroid/__main__.py", line 12, in <module>
init()
File "/Users/fabiorosado/Documents/GitHub.tmp/opsdroid/opsdroid/__main__.py", line 9, in init
opsdroid.cli.cli()
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-AmeH9qot/lib/python3.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-AmeH9qot/lib/python3.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-AmeH9qot/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-AmeH9qot/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-AmeH9qot/lib/python3.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/Users/fabiorosado/Documents/GitHub.tmp/opsdroid/opsdroid/cli/start.py", line 42, in start
opsdroid.run()
File "/Users/fabiorosado/Documents/GitHub.tmp/opsdroid/opsdroid/core.py", line 165, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/asyncio/base_events.py", line 583, in run_until_complete
return future.result()
File "/Users/fabiorosado/Documents/GitHub.tmp/opsdroid/opsdroid/connector/shell/__init__.py", line 93, in _parse_message
await self.parseloop()
File "/Users/fabiorosado/Documents/GitHub.tmp/opsdroid/opsdroid/connector/shell/__init__.py", line 88, in parseloop
await self.opsdroid.parse(message)
File "/Users/fabiorosado/Documents/GitHub.tmp/opsdroid/opsdroid/core.py", line 498, in parse
ranked_skills = await self.get_ranked_skills(unconstrained_skills, event)
File "/Users/fabiorosado/Documents/GitHub.tmp/opsdroid/opsdroid/core.py", line 429, in get_ranked_skills
if dialogflow and dialogflow["enabled"]:
KeyError: 'enabled'
ERROR: Unhandled exception in opsdroid, exiting...
ERROR: Unhandled exception in opsdroid, exiting...
|
KeyError
|
async def get_ranked_skills(self, skills, message):
"""Take a message and return a ranked list of matching skills.
Args:
skills (list): List of all available skills.
message (string): Context message to base the ranking of skills on.
Returns:
ranked_skills (list): List of all available skills sorted and ranked based on the score they muster when matched against the message parsed.
"""
ranked_skills = []
if isinstance(message, events.Message):
ranked_skills += await parse_regex(self, skills, message)
ranked_skills += await parse_format(self, skills, message)
if "parsers" in self.modules:
_LOGGER.debug(_("Processing parsers..."))
parsers = self.modules.get("parsers", {})
dialogflow = get_parser_config("dialogflow", parsers)
if dialogflow and dialogflow["enabled"]:
_LOGGER.debug(_("Checking dialogflow..."))
ranked_skills += await parse_dialogflow(self, skills, message, dialogflow)
luisai = get_parser_config("luisai", parsers)
if luisai and luisai["enabled"]:
_LOGGER.debug(_("Checking luisai..."))
ranked_skills += await parse_luisai(self, skills, message, luisai)
sapcai = get_parser_config("sapcai", parsers)
if sapcai and sapcai["enabled"]:
_LOGGER.debug(_("Checking SAPCAI..."))
ranked_skills += await parse_sapcai(self, skills, message, sapcai)
witai = get_parser_config("witai", parsers)
if witai and witai["enabled"]:
_LOGGER.debug(_("Checking wit.ai..."))
ranked_skills += await parse_witai(self, skills, message, witai)
watson = get_parser_config("watson", parsers)
if watson and watson["enabled"]:
_LOGGER.debug(_("Checking IBM Watson..."))
ranked_skills += await parse_watson(self, skills, message, watson)
rasanlu = get_parser_config("rasanlu", parsers)
if rasanlu and rasanlu["enabled"]:
_LOGGER.debug(_("Checking Rasa NLU..."))
ranked_skills += await parse_rasanlu(self, skills, message, rasanlu)
return sorted(ranked_skills, key=lambda k: k["score"], reverse=True)
|
async def get_ranked_skills(self, skills, message):
"""Take a message and return a ranked list of matching skills.
Args:
skills (list): List of all available skills.
message (string): Context message to base the ranking of skills on.
Returns:
ranked_skills (list): List of all available skills sorted and ranked based on the score they muster when matched against the message parsed.
"""
ranked_skills = []
if isinstance(message, events.Message):
ranked_skills += await parse_regex(self, skills, message)
ranked_skills += await parse_format(self, skills, message)
if "parsers" in self.config:
_LOGGER.debug(_("Processing parsers..."))
parsers = self.config["parsers"] or {}
dialogflow = parsers.get("dialogflow")
if dialogflow and dialogflow["enabled"]:
_LOGGER.debug(_("Checking dialogflow..."))
ranked_skills += await parse_dialogflow(self, skills, message, dialogflow)
luisai = parsers.get("luisai")
if luisai and luisai["enabled"]:
_LOGGER.debug(_("Checking luisai..."))
ranked_skills += await parse_luisai(self, skills, message, luisai)
sapcai = parsers.get("sapcai")
if sapcai and sapcai["enabled"]:
_LOGGER.debug(_("Checking SAPCAI..."))
ranked_skills += await parse_sapcai(self, skills, message, sapcai)
witai = parsers.get("witai")
if witai and witai["enabled"]:
_LOGGER.debug(_("Checking wit.ai..."))
ranked_skills += await parse_witai(self, skills, message, witai)
watson = parsers.get("watson")
if watson and watson["enabled"]:
_LOGGER.debug(_("Checking IBM Watson..."))
ranked_skills += await parse_watson(self, skills, message, watson)
rasanlu = parsers.get("rasanlu")
if rasanlu and rasanlu["enabled"]:
_LOGGER.debug(_("Checking Rasa NLU..."))
ranked_skills += await parse_rasanlu(self, skills, message, rasanlu)
return sorted(ranked_skills, key=lambda k: k["score"], reverse=True)
|
https://github.com/opsdroid/opsdroid/issues/1554
|
INFO opsdroid.logging: ========================================
INFO opsdroid.logging: Started opsdroid v0.8.1+833.g5c8dbb7.dirty.
INFO opsdroid: ========================================
INFO opsdroid: You can customise your opsdroid by modifying your configuration.yaml.
INFO opsdroid: Read more at: http://opsdroid.readthedocs.io/#configuration
INFO opsdroid: Watch the Get Started Videos at: http://bit.ly/2fnC0Fh
INFO opsdroid: Install Opsdroid Desktop at:
https://github.com/opsdroid/opsdroid-desktop/releases
INFO opsdroid: ========================================
DEBUG asyncio: Using selector: KqueueSelector
DEBUG opsdroid.loader: Loaded loader.
DEBUG opsdroid.loader: Loading modules from config...
WARNING opsdroid.loader: No databases in configuration. This will cause skills which store things in memory to lose data when opsdroid is restarted.
DEBUG opsdroid.loader: Loading parsers modules...
DEBUG opsdroid.loader: Loaded parsers: opsdroid.parsers.dialogflow.
DEBUG opsdroid.loader: Loading skill modules...
DEBUG opsdroid.loader: Updating dance...
DEBUG opsdroid.loader: b'Already up to date.'
DEBUG opsdroid.loader: b'Current branch master is up to date.'
DEBUG opsdroid.loader: Couldn't find the file requirements.txt, skipping.
DEBUG opsdroid.loader: Loaded skill: opsdroid-modules.skill.dance.
DEBUG opsdroid.loader: Updating hello...
DEBUG opsdroid.loader: b'Already up to date.'
DEBUG opsdroid.loader: b'Current branch master is up to date.'
DEBUG opsdroid.loader: Couldn't find the file requirements.txt, skipping.
DEBUG opsdroid.loader: Loaded skill: opsdroid-modules.skill.hello.
DEBUG opsdroid.loader: Updating loudnoises...
DEBUG opsdroid.loader: b'Already up to date.'
DEBUG opsdroid.loader: b'Current branch master is up to date.'
DEBUG opsdroid.loader: Couldn't find the file requirements.txt, skipping.
DEBUG opsdroid.loader: Loaded skill: opsdroid-modules.skill.loudnoises.
DEBUG opsdroid.loader: Updating seen...
DEBUG opsdroid.loader: b'Already up to date.'
DEBUG opsdroid.loader: b'Current branch master is up to date.'
DEBUG opsdroid.loader: b'Processing /Users/fabiorosado/Library/Caches/pip/wheels/c8/61/2f/47076152dc9487142c2ae48754c87539ff0993decf0fc2f198/ago-0.0.93-py3-none-any.whl'
DEBUG opsdroid.loader: b'Installing collected packages: ago'
DEBUG opsdroid.loader: b'Successfully installed ago-0.0.93'
DEBUG opsdroid.loader: b'WARNING: Target directory /Users/fabiorosado/Library/Application Support/opsdroid/site-packages/ago.py already exists. Specify --upgrade to force replacement.'
DEBUG opsdroid.loader: b'WARNING: Target directory /Users/fabiorosado/Library/Application Support/opsdroid/site-packages/__pycache__ already exists. Specify --upgrade to force replacement.'
DEBUG opsdroid.loader: b'WARNING: Target directory /Users/fabiorosado/Library/Application Support/opsdroid/site-packages/ago-0.0.93.dist-info already exists. Specify --upgrade to force replacement.'
DEBUG opsdroid.loader: Loaded skill: opsdroid-modules.skill.seen.
DEBUG opsdroid.loader: Loading connector modules...
DEBUG opsdroid.loader: Loaded connector: opsdroid.connector.websocket.
DEBUG opsdroid.loader: Loaded connector: opsdroid.connector.shell.
DEBUG opsdroid.core: Loaded 4 skills.
DEBUG opsdroid.connector.websocket: Starting Websocket connector.
DEBUG opsdroid.connector.shell: Loaded shell Connector.
DEBUG opsdroid.connector.shell: Connecting to shell.
INFO opsdroid.web: Started web server on http://0.0.0.0:8080
INFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.
opsdroid> DEBUG opsdroid.memory: Getting seen from memory.
DEBUG opsdroid.memory: Putting seen to memory.
hi
DEBUG opsdroid.core: Parsing input: <opsdroid.events.Message(text=hi)>.
DEBUG opsdroid.core: Processing parsers...
DEBUG opsdroid.memory: Getting seen from memory.
DEBUG opsdroid.memory: Putting seen to memory.
DEBUG asyncio: Using selector: KqueueSelector
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/Users/fabiorosado/Documents/GitHub.tmp/opsdroid/opsdroid/__main__.py", line 12, in <module>
init()
File "/Users/fabiorosado/Documents/GitHub.tmp/opsdroid/opsdroid/__main__.py", line 9, in init
opsdroid.cli.cli()
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-AmeH9qot/lib/python3.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-AmeH9qot/lib/python3.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-AmeH9qot/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-AmeH9qot/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-AmeH9qot/lib/python3.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/Users/fabiorosado/Documents/GitHub.tmp/opsdroid/opsdroid/cli/start.py", line 42, in start
opsdroid.run()
File "/Users/fabiorosado/Documents/GitHub.tmp/opsdroid/opsdroid/core.py", line 165, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/asyncio/base_events.py", line 583, in run_until_complete
return future.result()
File "/Users/fabiorosado/Documents/GitHub.tmp/opsdroid/opsdroid/connector/shell/__init__.py", line 93, in _parse_message
await self.parseloop()
File "/Users/fabiorosado/Documents/GitHub.tmp/opsdroid/opsdroid/connector/shell/__init__.py", line 88, in parseloop
await self.opsdroid.parse(message)
File "/Users/fabiorosado/Documents/GitHub.tmp/opsdroid/opsdroid/core.py", line 498, in parse
ranked_skills = await self.get_ranked_skills(unconstrained_skills, event)
File "/Users/fabiorosado/Documents/GitHub.tmp/opsdroid/opsdroid/core.py", line 429, in get_ranked_skills
if dialogflow and dialogflow["enabled"]:
KeyError: 'enabled'
ERROR: Unhandled exception in opsdroid, exiting...
ERROR: Unhandled exception in opsdroid, exiting...
|
KeyError
|
def load_config_file(config_paths):
"""Load a yaml config file from path.
We get a path for the configuration file and then use the yaml
library to load this file - the configuration will be shown as a
dict. Here we also add constructors to our yaml loader and handle
different exceptions that could be raised when trying to load or
validate the file.
Args:
config_paths: List of paths to configuration.yaml files
Returns:
dict: Dict containing config fields
"""
config_path = get_config_path(config_paths)
yaml.SafeLoader.add_implicit_resolver("!envvar", env_var_pattern, first="$")
yaml.SafeLoader.add_constructor("!envvar", envvar_constructor)
try:
with open(config_path, "r") as stream:
_LOGGER.info(_("Loaded config from %s."), config_path)
data = yaml.load(stream, Loader=yaml.SafeLoader)
validate_data_type(data)
configuration = update_pre_0_17_config_format(data)
configuration = validate_configuration(configuration, BASE_SCHEMA)
return configuration
except yaml.YAMLError as error:
_LOGGER.critical(error)
sys.exit(1)
except FileNotFoundError as error:
_LOGGER.critical(error)
sys.exit(1)
except TypeError as error:
_LOGGER.critical(error)
sys.exit(1)
|
def load_config_file(config_paths):
"""Load a yaml config file from path.
We get a path for the configuration file and then use the yaml
library to load this file - the configuration will be shown as a
dict. Here we also add constructors to our yaml loader and handle
different exceptions that could be raised when trying to load or
validate the file.
Args:
config_paths: List of paths to configuration.yaml files
Returns:
dict: Dict containing config fields
"""
config_path = get_config_path(config_paths)
yaml.SafeLoader.add_implicit_resolver("!envvar", env_var_pattern, first="$")
yaml.SafeLoader.add_constructor("!envvar", envvar_constructor)
try:
with open(config_path, "r") as stream:
_LOGGER.info(_("Loaded config from %s."), config_path)
data = yaml.load(stream, Loader=yaml.SafeLoader)
validate_data_type(data)
configuration = update_pre_0_17_config_format(data)
validate_configuration(configuration, BASE_SCHEMA)
return configuration
except yaml.YAMLError as error:
_LOGGER.critical(error)
sys.exit(1)
except FileNotFoundError as error:
_LOGGER.critical(error)
sys.exit(1)
except TypeError as error:
_LOGGER.critical(error)
sys.exit(1)
|
https://github.com/opsdroid/opsdroid/issues/1347
|
hemmo_1 | DEBUG opsdroid.parsers.crontab: Running crontab skills at Thu Jan 30 08:29:00 2020.
hemmo_1 | ERROR opsdroid.core: Exception when running skill 'cve'.
hemmo_1 | Traceback (most recent call last):
hemmo_1 | File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 397, in run_skill
hemmo_1 | return await skill(event)
hemmo_1 | File "/root/.local/share/opsdroid/opsdroid-modules/skill/cve/__init__.py", line 142, in schedule
hemmo_1 | await message.respond(f"Searching CVEs with queries: {', '.join(queries)}")
hemmo_1 | File "/usr/local/lib/python3.7/site-packages/opsdroid/events.py", line 263, in respond
hemmo_1 | "thinking-delay" in self.connector.configuration
hemmo_1 | AttributeError: 'NoneType' object has no attribute 'configuration'
|
AttributeError
|
def validate_configuration(config, schema):
"""Validate data from configuration.yaml.
We use voluptuous to validate the data obtained from the
configuration file with the schema declared above. Voluptuous
will raise a 'voluptuous.MultipleInvalid' exception if the data
passed doesn't match the schema.
This is a helper function so we don't need to do much with it other than
initialize voluptuous.Schema and attempt to validate the data. The function
'load_config_file' located on 'opsdroid.configuration.__init__' will handle
the case when the exception is raised.
Args:
config: a yaml stream obtained from opening configuration.yaml
schema (dict): the rules used to validate against data
"""
validate = Schema(schema, extra=ALLOW_EXTRA)
try:
return validate(config)
except MultipleInvalid as error:
_LOGGER.critical(
_("Configuration for %s failed validation! %s - '%s'."),
config.get("name", "basic opsdroid rules"),
error.msg.capitalize(),
error.path[0],
)
sys.exit(1)
|
def validate_configuration(config, schema):
"""Validate data from configuration.yaml.
We use voluptuous to validate the data obtained from the
configuration file with the schema declared above. Voluptuous
will raise a 'voluptuous.MultipleInvalid' exception if the data
passed doesn't match the schema.
This is a helper function so we don't need to do much with it other than
initialize voluptuous.Schema and attempt to validate the data. The function
'load_config_file' located on 'opsdroid.configuration.__init__' will handle
the case when the exception is raised.
Args:
config: a yaml stream obtained from opening configuration.yaml
schema (dict): the rules used to validate against data
"""
validate = Schema(schema, extra=ALLOW_EXTRA)
try:
validate(config)
except MultipleInvalid as error:
_LOGGER.critical(
_("Configuration for %s failed validation! %s - '%s'."),
config.get("name", "basic opsdroid rules"),
error.msg.capitalize(),
error.path[0],
)
sys.exit(1)
|
https://github.com/opsdroid/opsdroid/issues/1347
|
hemmo_1 | DEBUG opsdroid.parsers.crontab: Running crontab skills at Thu Jan 30 08:29:00 2020.
hemmo_1 | ERROR opsdroid.core: Exception when running skill 'cve'.
hemmo_1 | Traceback (most recent call last):
hemmo_1 | File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 397, in run_skill
hemmo_1 | return await skill(event)
hemmo_1 | File "/root/.local/share/opsdroid/opsdroid-modules/skill/cve/__init__.py", line 142, in schedule
hemmo_1 | await message.respond(f"Searching CVEs with queries: {', '.join(queries)}")
hemmo_1 | File "/usr/local/lib/python3.7/site-packages/opsdroid/events.py", line 263, in respond
hemmo_1 | "thinking-delay" in self.connector.configuration
hemmo_1 | AttributeError: 'NoneType' object has no attribute 'configuration'
|
AttributeError
|
def _load_modules(self, modules_type, modules):
"""Install and load modules.
Args:
self: instance method
modules_type (str): Type of module being loaded
modules (dict): Dictionary containing all modules
Returns:
list: modules and their config information
"""
_LOGGER.debug(_("Loading %s modules..."), modules_type)
loaded_modules = list()
if not os.path.isdir(DEFAULT_MODULE_DEPS_PATH):
os.makedirs(DEFAULT_MODULE_DEPS_PATH)
sys.path.append(DEFAULT_MODULE_DEPS_PATH)
# entry point group naming scheme: opsdroid_ + module type plural,
# eg. "opsdroid_databases"
epname = "opsdroid_{}s".format(modules_type)
entry_points = {ep.name: ep for ep in iter_entry_points(group=epname)}
for epname in entry_points:
_LOGGER.debug(
_("Found installed package for %s '%s' support."), modules_type, epname
)
for module in modules:
config = self.setup_module_config(modules, module, modules_type, entry_points)
# If the module isn't builtin, or isn't already on the
# python path, install it
if not (config["is_builtin"] or config["module"] or config["entrypoint"]):
# Remove module for reinstall if no-cache set
self.check_cache(config)
# Install or update module
if not self._is_module_installed(config):
self._install_module(config)
else:
self._update_module(config)
# Import module
self.current_import_config = config
module = self.import_module(config)
# Suppress exception if module doesn't contain CONFIG_SCHEMA
with contextlib.suppress(AttributeError):
config = validate_configuration(config, module.CONFIG_SCHEMA)
# Load intents
intents = self._load_intents(config)
if module is not None:
loaded_modules.append(
{"module": module, "config": config, "intents": intents}
)
else:
_LOGGER.error(_("Module %s failed to import."), config["name"])
return loaded_modules
|
def _load_modules(self, modules_type, modules):
"""Install and load modules.
Args:
self: instance method
modules_type (str): Type of module being loaded
modules (dict): Dictionary containing all modules
Returns:
list: modules and their config information
"""
_LOGGER.debug(_("Loading %s modules..."), modules_type)
loaded_modules = list()
if not os.path.isdir(DEFAULT_MODULE_DEPS_PATH):
os.makedirs(DEFAULT_MODULE_DEPS_PATH)
sys.path.append(DEFAULT_MODULE_DEPS_PATH)
# entry point group naming scheme: opsdroid_ + module type plural,
# eg. "opsdroid_databases"
epname = "opsdroid_{}s".format(modules_type)
entry_points = {ep.name: ep for ep in iter_entry_points(group=epname)}
for epname in entry_points:
_LOGGER.debug(
_("Found installed package for %s '%s' support."), modules_type, epname
)
for module in modules:
config = self.setup_module_config(modules, module, modules_type, entry_points)
# If the module isn't builtin, or isn't already on the
# python path, install it
if not (config["is_builtin"] or config["module"] or config["entrypoint"]):
# Remove module for reinstall if no-cache set
self.check_cache(config)
# Install or update module
if not self._is_module_installed(config):
self._install_module(config)
else:
self._update_module(config)
# Import module
self.current_import_config = config
module = self.import_module(config)
# Suppress exception if module doesn't contain CONFIG_SCHEMA
with contextlib.suppress(AttributeError):
validate_configuration(config, module.CONFIG_SCHEMA)
# Load intents
intents = self._load_intents(config)
if module is not None:
loaded_modules.append(
{"module": module, "config": config, "intents": intents}
)
else:
_LOGGER.error(_("Module %s failed to import."), config["name"])
return loaded_modules
|
https://github.com/opsdroid/opsdroid/issues/1347
|
hemmo_1 | DEBUG opsdroid.parsers.crontab: Running crontab skills at Thu Jan 30 08:29:00 2020.
hemmo_1 | ERROR opsdroid.core: Exception when running skill 'cve'.
hemmo_1 | Traceback (most recent call last):
hemmo_1 | File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 397, in run_skill
hemmo_1 | return await skill(event)
hemmo_1 | File "/root/.local/share/opsdroid/opsdroid-modules/skill/cve/__init__.py", line 142, in schedule
hemmo_1 | await message.respond(f"Searching CVEs with queries: {', '.join(queries)}")
hemmo_1 | File "/usr/local/lib/python3.7/site-packages/opsdroid/events.py", line 263, in respond
hemmo_1 | "thinking-delay" in self.connector.configuration
hemmo_1 | AttributeError: 'NoneType' object has no attribute 'configuration'
|
AttributeError
|
async def websocket_handler(self, request):
"""Handle for aiohttp handling websocket connections."""
socket = request.match_info.get("socket")
available = [item for item in self.available_connections if item["id"] == socket]
if len(available) != 1:
return aiohttp.web.Response(
text=json.dumps("Please request a socket first"),
headers=HEADERS,
status=400,
)
if (
datetime.now() - available[0]["date"]
).total_seconds() > self.connection_timeout:
self.available_connections.remove(available[0])
return aiohttp.web.Response(
text=json.dumps("Socket request timed out"), headers=HEADERS, status=408
)
self.available_connections.remove(available[0])
_LOGGER.debug(_("User connected to %s."), socket)
websocket = aiohttp.web.WebSocketResponse()
await websocket.prepare(request)
self.active_connections[socket] = websocket
async for msg in websocket:
if msg.type == aiohttp.WSMsgType.TEXT:
message = Message(text=msg.data, user=None, target=None, connector=self)
await self.opsdroid.parse(message)
elif msg.type == aiohttp.WSMsgType.ERROR:
_LOGGER.error(
_("Websocket connection closed with exception %s."),
websocket.exception(),
)
_LOGGER.info(_("websocket connection closed"))
self.active_connections.pop(socket, None)
return websocket
|
async def websocket_handler(self, request):
"""Handle for aiohttp handling websocket connections."""
socket = request.match_info.get("socket")
available = [item for item in self.available_connections if item["id"] == socket]
if len(available) != 1:
return aiohttp.web.Response(
text=json.dumps("Please request a socket first"),
headers=HEADERS,
status=400,
)
if (
datetime.now() - available[0]["date"]
).total_seconds() > self.connection_timeout:
self.available_connections.remove(available[0])
return aiohttp.web.Response(
text=json.dumps("Socket request timed out"), headers=HEADERS, status=408
)
self.available_connections.remove(available[0])
_LOGGER.debug(_("User connected to %s."), socket)
websocket = aiohttp.web.WebSocketResponse()
await websocket.prepare(request)
self.active_connections[socket] = websocket
async for msg in websocket:
if msg.type == aiohttp.WSMsgType.TEXT:
message = Message(msg.data, None, None, self)
await self.opsdroid.parse(message)
elif msg.type == aiohttp.WSMsgType.ERROR:
_LOGGER.error(
_("Websocket connection closed with exception %s."),
websocket.exception(),
)
_LOGGER.info(_("websocket connection closed"))
self.active_connections.pop(socket, None)
return websocket
|
https://github.com/opsdroid/opsdroid/issues/1310
|
C:\Users\integrat>opsdroid start
INFO opsdroid.logging: ========================================
INFO opsdroid.logging: Started opsdroid v0.17.0.
INFO opsdroid: ========================================
INFO opsdroid: You can customise your opsdroid by modifying your configuration.yaml.
INFO opsdroid: Read more at: http://opsdroid.readthedocs.io/#configuration
INFO opsdroid: Watch the Get Started Videos at: http://bit.ly/2fnC0Fh
INFO opsdroid: Install Opsdroid Desktop at:
https://github.com/opsdroid/opsdroid-desktop/releases
INFO opsdroid: ========================================
WARNING opsdroid.loader: No databases in configuration. This will cause skills which store things in memory to lose data when opsdroid is restarted.
INFO opsdroid.web: Started web server on http://0.0.0.0:8080
INFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.
INFO aiohttp.access: 10.52.81.154 [21/Dec/2019:09:39:20 +0000] "POST /connector/websocket HTTP/1.1" 200 252 "-" "-"
ERROR opsdroid.core: Exception when running skill 'hello'.
Traceback (most recent call last):
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\opsdroid\core.py", line 396, in run_skill
return await skill(event)
File "C:\Users\integrat\AppData\Local\opsdroid-modules\opsdroid\opsdroid-modules\skill\hello\__init__.py", line 13, in hello
await message.respond(text)
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\opsdroid\events.py", line 263, in respond
"thinking-delay" in self.connector.configuration
AttributeError: 'NoneType' object has no attribute 'configuration'
ERROR aiohttp.server: Error handling request
Traceback (most recent call last):
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\opsdroid\core.py", line 396, in run_skill
return await skill(event)
File "C:\Users\integrat\AppData\Local\opsdroid-modules\opsdroid\opsdroid-modules\skill\hello\__init__.py", line 13, in hello
await message.respond(text)
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\opsdroid\events.py", line 263, in respond
"thinking-delay" in self.connector.configuration
AttributeError: 'NoneType' object has no attribute 'configuration'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\aiohttp\web_protocol.py", line 418, in start
resp = await task
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\aiohttp\web_app.py", line 458, in _handle
resp = await handler(request)
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\opsdroid\connector\websocket\__init__.py", line 101, in websocket_handler
await self.opsdroid.parse(message)
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\opsdroid\core.py", line 509, in parse
await asyncio.gather(*tasks)
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\opsdroid\core.py", line 402, in run_skill
await event.respond(
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\opsdroid\events.py", line 263, in respond
"thinking-delay" in self.connector.configuration
AttributeError: 'NoneType' object has no attribute 'configuration'
INFO aiohttp.access: 10.52.81.154 [21/Dec/2019:09:39:24 +0000] "POST /connector/websocket HTTP/1.1" 200 252 "-" "-"
`
|
AttributeError
|
async def parseloop(self):
"""Parseloop moved out for testing."""
self.draw_prompt()
user_input = await self.async_input()
message = Message(text=user_input, user=self.user, target=None, connector=self)
await self.opsdroid.parse(message)
|
async def parseloop(self):
"""Parseloop moved out for testing."""
self.draw_prompt()
user_input = await self.async_input()
message = Message(user_input, self.user, None, self)
await self.opsdroid.parse(message)
|
https://github.com/opsdroid/opsdroid/issues/1310
|
C:\Users\integrat>opsdroid start
INFO opsdroid.logging: ========================================
INFO opsdroid.logging: Started opsdroid v0.17.0.
INFO opsdroid: ========================================
INFO opsdroid: You can customise your opsdroid by modifying your configuration.yaml.
INFO opsdroid: Read more at: http://opsdroid.readthedocs.io/#configuration
INFO opsdroid: Watch the Get Started Videos at: http://bit.ly/2fnC0Fh
INFO opsdroid: Install Opsdroid Desktop at:
https://github.com/opsdroid/opsdroid-desktop/releases
INFO opsdroid: ========================================
WARNING opsdroid.loader: No databases in configuration. This will cause skills which store things in memory to lose data when opsdroid is restarted.
INFO opsdroid.web: Started web server on http://0.0.0.0:8080
INFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.
INFO aiohttp.access: 10.52.81.154 [21/Dec/2019:09:39:20 +0000] "POST /connector/websocket HTTP/1.1" 200 252 "-" "-"
ERROR opsdroid.core: Exception when running skill 'hello'.
Traceback (most recent call last):
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\opsdroid\core.py", line 396, in run_skill
return await skill(event)
File "C:\Users\integrat\AppData\Local\opsdroid-modules\opsdroid\opsdroid-modules\skill\hello\__init__.py", line 13, in hello
await message.respond(text)
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\opsdroid\events.py", line 263, in respond
"thinking-delay" in self.connector.configuration
AttributeError: 'NoneType' object has no attribute 'configuration'
ERROR aiohttp.server: Error handling request
Traceback (most recent call last):
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\opsdroid\core.py", line 396, in run_skill
return await skill(event)
File "C:\Users\integrat\AppData\Local\opsdroid-modules\opsdroid\opsdroid-modules\skill\hello\__init__.py", line 13, in hello
await message.respond(text)
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\opsdroid\events.py", line 263, in respond
"thinking-delay" in self.connector.configuration
AttributeError: 'NoneType' object has no attribute 'configuration'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\aiohttp\web_protocol.py", line 418, in start
resp = await task
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\aiohttp\web_app.py", line 458, in _handle
resp = await handler(request)
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\opsdroid\connector\websocket\__init__.py", line 101, in websocket_handler
await self.opsdroid.parse(message)
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\opsdroid\core.py", line 509, in parse
await asyncio.gather(*tasks)
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\opsdroid\core.py", line 402, in run_skill
await event.respond(
File "c:\users\integrat\appdata\local\programs\python\python38-32\lib\site-packages\opsdroid\events.py", line 263, in respond
"thinking-delay" in self.connector.configuration
AttributeError: 'NoneType' object has no attribute 'configuration'
INFO aiohttp.access: 10.52.81.154 [21/Dec/2019:09:39:24 +0000] "POST /connector/websocket HTTP/1.1" 200 252 "-" "-"
`
|
AttributeError
|
async def slack_interactions_handler(self, request):
"""Handle interactive events in Slack.
For each entry in request, it will check if the entry is one of the four main
interaction types in slack: block_actions, message_actions, view_submissions
and view_closed. Then it will process all the incoming messages.
Return:
A 200 OK response. The Messenger Platform will resend the webhook
event every 20 seconds, until a 200 OK response is received.
Failing to return a 200 OK may cause your webhook to be
unsubscribed by the Messenger Platform.
"""
req_data = await request.post()
payload = json.loads(req_data["payload"])
if "type" in payload:
if payload["type"] == "block_actions":
for action in payload["actions"]:
block_action = BlockActions(
payload,
user=payload["user"]["id"],
target=payload["channel"]["id"],
connector=self,
)
action_value = None
if action["type"] == "button":
action_value = action["value"]
elif action["type"] in ["overflow", "static_select"]:
action_value = action["selected_option"]["value"]
elif action["type"] == "datepicker":
action_value = action["selected_date"]
elif action["type"] == "multi_static_select":
action_value = [v["value"] for v in action["selected_options"]]
if action_value:
await block_action.update_entity("value", action_value)
await self.opsdroid.parse(block_action)
elif payload["type"] == "message_action":
await self.opsdroid.parse(
MessageAction(
payload,
user=payload["user"]["id"],
target=payload["channel"]["id"],
connector=self,
)
)
elif payload["type"] == "view_submission":
await self.opsdroid.parse(
ViewSubmission(
payload,
user=payload["user"]["id"],
target=payload["user"]["id"],
connector=self,
)
)
elif payload["type"] == "view_closed":
await self.opsdroid.parse(
ViewClosed(
payload,
user=payload["user"]["id"],
target=payload["user"]["id"],
connector=self,
)
)
return aiohttp.web.Response(text=json.dumps("Received"), status=200)
|
async def slack_interactions_handler(self, request):
"""Handle interactive events in Slack.
For each entry in request, it will check if the entry is one of the four main
interaction types in slack: block_actions, message_actions, view_submissions
and view_closed. Then it will process all the incoming messages.
Return:
A 200 OK response. The Messenger Platform will resend the webhook
event every 20 seconds, until a 200 OK response is received.
Failing to return a 200 OK may cause your webhook to be
unsubscribed by the Messenger Platform.
"""
req_data = await request.post()
payload = json.loads(req_data["payload"])
if "type" in payload:
if payload["type"] == "block_actions":
for action in payload["actions"]:
block_action = BlockActions(
payload,
user=payload["user"]["id"],
target=payload["channel"]["id"],
connector=self,
)
await block_action.update_entity("value", action["value"])
await self.opsdroid.parse(block_action)
elif payload["type"] == "message_action":
await self.opsdroid.parse(
MessageAction(
payload,
user=payload["user"]["id"],
target=payload["channel"]["id"],
connector=self,
)
)
elif payload["type"] == "view_submission":
await self.opsdroid.parse(
ViewSubmission(
payload,
user=payload["user"]["id"],
target=payload["user"]["id"],
connector=self,
)
)
elif payload["type"] == "view_closed":
await self.opsdroid.parse(
ViewClosed(
payload,
user=payload["user"]["id"],
target=payload["user"]["id"],
connector=self,
)
)
return aiohttp.web.Response(text=json.dumps("Received"), status=200)
|
https://github.com/opsdroid/opsdroid/issues/1302
|
ERROR aiohttp.server: Error handling request
Traceback (most recent call last):
File "/path/to/venv/lib/python3.7/site-packages/aiohttp/web_protocol.py", line 418, in start
resp = await task
File "/path/to/venv/lib/python3.7/site-packages/aiohttp/web_app.py", line 458, in _handle
resp = await handler(request)
File "/path/to/venv/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py", line 268, in slack_interactions_handler
await block_action.update_entity("value", action["value"])
KeyError: 'value'
INFO aiohttp.access: 127.0.0.1 [14/Dec/2019:15:01:36 +0000] "POST /connector/slack/interactions HTTP/1.1" 500 244 "-" "Slackbot 1.0 (+https://api.slack.com/robots)"
|
KeyError
|
async def match_event(event, event_opts):
"""Filter and matches the event."""
event_type = event_opts.get("type", None)
if event_type:
# The event type can be specified with a string
if isinstance(event_type, str):
# pylint: disable=invalid-name
et = Event.event_registry.get(event_type, None)
if et is None:
raise ValueError(
"{event_type} is not a valid opsdroid event representation.".format(
event_type=event_type
)
)
event_type = et
# TODO: Add option to match all subclasses as well
# if isinstance(event, event_type):
# pylint: disable=unidiomatic-typecheck
if type(event) is event_type:
for key in event_opts:
if key != "type":
event_value = event_opts.get(key, None)
entity_value = event.entities.get(key, {}).get("value", None)
if (
isinstance(event_value, list)
and isinstance(entity_value, list)
and sorted(event_value) != sorted(entity_value)
):
return False
if event_value != entity_value:
return False
return True
return False
|
async def match_event(event, event_opts):
"""Filter and matches the event."""
event_type = event_opts.get("type", None)
if event_type:
# The event type can be specified with a string
if isinstance(event_type, str):
# pylint: disable=invalid-name
et = Event.event_registry.get(event_type, None)
if et is None:
raise ValueError(
"{event_type} is not a valid opsdroid event representation.".format(
event_type=event_type
)
)
event_type = et
# TODO: Add option to match all subclasses as well
# if isinstance(event, event_type):
# pylint: disable=unidiomatic-typecheck
if type(event) is event_type:
for key in event_opts:
if key != "type":
event_value = event_opts.get(key, None)
entity_value = event.entities.get(key, {}).get("value", None)
if event_value != entity_value:
return False
return True
return False
|
https://github.com/opsdroid/opsdroid/issues/1302
|
ERROR aiohttp.server: Error handling request
Traceback (most recent call last):
File "/path/to/venv/lib/python3.7/site-packages/aiohttp/web_protocol.py", line 418, in start
resp = await task
File "/path/to/venv/lib/python3.7/site-packages/aiohttp/web_app.py", line 458, in _handle
resp = await handler(request)
File "/path/to/venv/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py", line 268, in slack_interactions_handler
await block_action.update_entity("value", action["value"])
KeyError: 'value'
INFO aiohttp.access: 127.0.0.1 [14/Dec/2019:15:01:36 +0000] "POST /connector/slack/interactions HTTP/1.1" 500 244 "-" "Slackbot 1.0 (+https://api.slack.com/robots)"
|
KeyError
|
def __init__(self, config, opsdroid=None):
"""Create the connector."""
super().__init__(config, opsdroid=opsdroid)
_LOGGER.debug(_("Starting Slack connector"))
self.name = "slack"
self.default_target = config.get("default-room", "#general")
self.icon_emoji = config.get("icon-emoji", ":robot_face:")
self.token = config["api-token"]
self.timeout = config.get("connect-timeout", 10)
self.ssl_context = ssl.create_default_context(cafile=certifi.where())
self.slack = slack.WebClient(token=self.token, run_async=True, ssl=self.ssl_context)
self.slack_rtm = slack.RTMClient(
token=self.token, run_async=True, ssl=self.ssl_context
)
self.websocket = None
self.bot_name = config.get("bot-name", "opsdroid")
self.auth_info = None
self.user_info = None
self.bot_id = None
self.known_users = {}
self.keepalive = None
self.reconnecting = False
self.listening = True
self._message_id = 0
# Register callbacks
slack.RTMClient.on(event="message", callback=self.process_message)
|
def __init__(self, config, opsdroid=None):
"""Create the connector."""
super().__init__(config, opsdroid=opsdroid)
_LOGGER.debug(_("Starting Slack connector"))
self.name = "slack"
self.default_target = config.get("default-room", "#general")
self.icon_emoji = config.get("icon-emoji", ":robot_face:")
self.token = config["api-token"]
self.timeout = config.get("connect-timeout", 10)
self.slack = slack.WebClient(token=self.token, run_async=True)
self.slack_rtm = slack.RTMClient(token=self.token, run_async=True)
self.websocket = None
self.bot_name = config.get("bot-name", "opsdroid")
self.auth_info = None
self.user_info = None
self.bot_id = None
self.known_users = {}
self.keepalive = None
self.reconnecting = False
self.listening = True
self._message_id = 0
# Register callbacks
slack.RTMClient.on(event="message", callback=self.process_message)
|
https://github.com/opsdroid/opsdroid/issues/1232
|
INFO opsdroid.connector.slack: Connecting to Slack
Traceback (most recent call last):
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 936, in _wrap_create_connection
return await self._loop.create_connection(*args, **kwargs) # type: ignore # noqa
File "/usr/lib/python3.6/asyncio/base_events.py", line 820, in create_connection
sock, protocol_factory, ssl, server_hostname)
File "/usr/lib/python3.6/asyncio/base_events.py", line 846, in _create_connection_transport
yield from waiter
File "/usr/lib/python3.6/asyncio/sslproto.py", line 505, in data_received
ssldata, appdata = self._sslpipe.feed_ssldata(data)
File "/usr/lib/python3.6/asyncio/sslproto.py", line 201, in feed_ssldata
self._sslobj.do_handshake()
File "/usr/lib/python3.6/ssl.py", line 689, in do_handshake
self._sslobj.do_handshake()
ssl.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:852)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/opsdroid/connector/slack/__init__.py", line 55, in connect
self.auth_info = (await self.slack.api_call("auth.test")).data
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/slack/web/base_client.py", line 229, in _send
http_verb=http_verb, api_url=api_url, req_args=req_args
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/slack/web/base_client.py", line 259, in _request
async with session.request(http_verb, api_url, **req_args) as res:
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/client.py", line 1012, in __aenter__
self._resp = await self._coro
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/client.py", line 483, in _request
timeout=real_timeout
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 523, in connect
proto = await self._create_connection(req, traces, timeout)
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 859, in _create_connection
req, traces, timeout)
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 1004, in _create_direct_connection
raise last_exc
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 986, in _create_direct_connection
req=req, client_error=client_error)
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 941, in _wrap_create_connection
raise ClientConnectorSSLError(req.connection_key, exc) from exc
aiohttp.client_exceptions.ClientConnectorSSLError: Cannot connect to host www.slack.com:443 ssl:default [[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:852)]
|
ssl.SSLError
|
async def process_message(self, **payload):
"""Process a raw message and pass it to the parser."""
message = payload["data"]
# Ignore message edits
if "subtype" in message and message["subtype"] == "message_changed":
return
# Ignore own messages
if (
"subtype" in message
and message["subtype"] == "bot_message"
and message["bot_id"] == self.bot_id
):
return
# Lookup username
_LOGGER.debug(_("Looking up sender username"))
try:
user_info = await self.lookup_username(message["user"])
except ValueError:
return
# Replace usernames in the message
_LOGGER.debug(_("Replacing userids in message with usernames"))
message["text"] = await self.replace_usernames(message["text"])
await self.opsdroid.parse(
Message(
message["text"],
user_info["name"],
message["channel"],
self,
raw_event=message,
)
)
|
async def process_message(self, **payload):
"""Process a raw message and pass it to the parser."""
message = payload["data"]
# Ignore own messages
if (
"subtype" in message
and message["subtype"] == "bot_message"
and message["bot_id"] == self.bot_id
):
return
# Lookup username
_LOGGER.debug(_("Looking up sender username"))
try:
user_info = await self.lookup_username(message["user"])
except ValueError:
return
# Replace usernames in the message
_LOGGER.debug(_("Replacing userids in message with usernames"))
message["text"] = await self.replace_usernames(message["text"])
await self.opsdroid.parse(
Message(
message["text"],
user_info["name"],
message["channel"],
self,
raw_event=message,
)
)
|
https://github.com/opsdroid/opsdroid/issues/1217
|
ERROR slack.rtm.client: When calling '#process_message()' in the 'opsdroid.connector.slack' module the following error was raised: 'user'
Traceback (most recent call last):
File "C:\Users\koe4945\desktop\git\personal\ops\venv\Scripts\opsdroid-script.py", line 11, in <module>
load_entry_point('opsdroid', 'console_scripts', 'opsdroid')()
File "c:\users\koe4945\desktop\git\personal\ops\venv\lib\site-packages\click\core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "c:\users\koe4945\desktop\git\personal\ops\venv\lib\site-packages\click\core.py", line 717, in main
rv = self.invoke(ctx)
File "c:\users\koe4945\desktop\git\personal\ops\venv\lib\site-packages\click\core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "c:\users\koe4945\desktop\git\personal\ops\venv\lib\site-packages\click\core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "c:\users\koe4945\desktop\git\personal\ops\venv\lib\site-packages\click\core.py", line 555, in invoke
return callback(*args, **kwargs)
File "c:\users\koe4945\desktop\git\personal\ops\opsdroid\opsdroid\cli\start.py", line 31, in start
opsdroid.run()
File "c:\users\koe4945\desktop\git\personal\ops\opsdroid\opsdroid\core.py", line 161, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "C:\Users\koe4945\AppData\Local\Programs\Python\Python37\lib\asyncio\base_events.py", line 584, in run_until_complete
return future.result()
File "c:\users\koe4945\desktop\git\personal\ops\python-slackclient\slack\rtm\client.py", line 339, in _connect_and_read
await self._read_messages()
File "c:\users\koe4945\desktop\git\personal\ops\python-slackclient\slack\rtm\client.py", line 390, in _read_messages
await self._dispatch_event(event, data=payload)
File "c:\users\koe4945\desktop\git\personal\ops\python-slackclient\slack\rtm\client.py", line 437, in _dispatch_event
rtm_client=self, web_client=self._web_client, data=data
File "c:\users\koe4945\desktop\git\personal\ops\opsdroid\opsdroid\connector\slack\__init__.py", line 104, in process_message
user_info = await self.lookup_username(message["user"])
KeyError: 'user'
ERROR: Unhandled exception in opsdroid, exiting...
|
KeyError
|
async def train_rasanlu(config, skills):
"""Train a Rasa NLU model based on the loaded skills."""
_LOGGER.info(_("Starting Rasa NLU training."))
intents = await _get_all_intents(skills)
if intents is None:
_LOGGER.warning(_("No intents found, skipping training."))
return False
config["model"] = await _get_intents_fingerprint(intents)
if config["model"] in await _get_existing_models(config):
_LOGGER.info(_("This model already exists, skipping training..."))
await _init_model(config)
return True
async with aiohttp.ClientSession() as session:
_LOGGER.info(_("Now training the model. This may take a while..."))
url = await _build_training_url(config)
# https://github.com/RasaHQ/rasa_nlu/blob/master/docs/http.rst#post-train
# Note : The request should always be sent as
# application/x-yml regardless of wether you use
# json or md for the data format. Do not send json as
# application/json for example.+
headers = {"content-type": "application/x-yml"}
try:
training_start = arrow.now()
resp = await session.post(url, data=intents, headers=headers)
except aiohttp.client_exceptions.ClientConnectorError:
_LOGGER.error(_("Unable to connect to Rasa NLU, training failed."))
return False
if resp.status == 200:
if resp.content_type == "application/json":
result = await resp.json()
if "info" in result and "new model trained" in result["info"]:
time_taken = (arrow.now() - training_start).total_seconds()
_LOGGER.info(
_("Rasa NLU training completed in %s seconds."), int(time_taken)
)
await _init_model(config)
return True
_LOGGER.debug(result)
if (
resp.content_type == "application/zip"
and resp.content_disposition.type == "attachment"
):
time_taken = (arrow.now() - training_start).total_seconds()
_LOGGER.info(
_("Rasa NLU training completed in %s seconds."), int(time_taken)
)
await _init_model(config)
"""
As inditated in the issue #886, returned zip file is ignored, this can be changed
This can be changed in future release if needed
Saving model.zip file example :
try:
output_file = open("/target/directory/model.zip","wb")
data = await resp.read()
output_file.write(data)
output_file.close()
_LOGGER.debug("Rasa taining model file saved to /target/directory/model.zip")
except:
_LOGGER.error("Cannot save rasa taining model file to /target/directory/model.zip")
"""
return True
_LOGGER.error(_("Bad Rasa NLU response - %s"), await resp.text())
_LOGGER.error(_("Rasa NLU training failed."))
return False
|
async def train_rasanlu(config, skills):
"""Train a Rasa NLU model based on the loaded skills."""
_LOGGER.info(_("Starting Rasa NLU training."))
intents = await _get_all_intents(skills)
if intents is None:
_LOGGER.warning(_("No intents found, skipping training."))
return False
config["model"] = await _get_intents_fingerprint(intents)
if config["model"] in await _get_existing_models(config):
_LOGGER.info(_("This model already exists, skipping training..."))
await _init_model(config)
return True
async with aiohttp.ClientSession() as session:
_LOGGER.info(_("Now training the model. This may take a while..."))
url = await _build_training_url(config)
# https://github.com/RasaHQ/rasa_nlu/blob/master/docs/http.rst#post-train
# Note : The request should always be sent as
# application/x-yml regardless of wether you use
# json or md for the data format. Do not send json as
# application/json for example.+
headers = {"content-type": "application/x-yml"}
try:
training_start = arrow.now()
resp = await session.post(url, data=intents, headers=headers)
except aiohttp.client_exceptions.ClientConnectorError:
_LOGGER.error(_("Unable to connect to Rasa NLU, training failed."))
return False
if resp.status == 200:
result = await resp.json()
if "info" in result and "new model trained" in result["info"]:
time_taken = (arrow.now() - training_start).total_seconds()
_LOGGER.info(
_("Rasa NLU training completed in %s seconds."), int(time_taken)
)
await _init_model(config)
return True
_LOGGER.debug(result)
_LOGGER.error(_("Bad Rasa NLU response - %s"), await resp.text())
_LOGGER.error(_("Rasa NLU training failed."))
return False
|
https://github.com/opsdroid/opsdroid/issues/886
|
INFO opsdroid.parsers.rasanlu: Starting Rasa NLU training.
INFO opsdroid.parsers.rasanlu: Now training the model. This may take a while...
DEBUG asyncio: Using selector: KqueueSelector
Traceback (most recent call last):
...
File "/Users/hicham/Developments/Bots/opsdroid-iobreaker/opsdroid/opsdroid/parsers/rasanlu.py", line 117, in train_rasanlu
result = await resp.json()
File "/Users/hicham/.virtualenvs/opsdroid/lib/python3.7/site-packages/aiohttp/client_reqrep.py", line 1027, in json
headers=self.headers)
aiohttp.client_exceptions.ContentTypeError: 0, message='Attempt to decode JSON with unexpected mimetype: application/zip'
|
aiohttp.client_exceptions.ContentTypeError
|
def _load_intents(config):
intent_file = os.path.join(config["install_path"], "intents.yml")
if os.path.isfile(intent_file):
with open(intent_file, "r") as intent_file_handle:
intents = intent_file_handle.read()
return intents
else:
return None
|
def _load_intents(config):
intent_file = os.path.join(config["install_path"], "intents.md")
if os.path.isfile(intent_file):
with open(intent_file, "r") as intent_file_handle:
intents = intent_file_handle.read()
return intents
else:
return None
|
https://github.com/opsdroid/opsdroid/issues/860
|
INFO opsdroid.parsers.rasanlu: Starting Rasa NLU training.
DEBUG asyncio: Using selector: KqueueSelector
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/__main__.py", line 206, in <module>
init()
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/__main__.py", line 203, in init
main()
File "/Users/IOBreaker/Developments/Bots/opsdroid/venv/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/IOBreaker/Developments/Bots/opsdroid/venv/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/IOBreaker/Developments/Bots/opsdroid/venv/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/IOBreaker/Developments/Bots/opsdroid/venv/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/__main__.py", line 196, in main
opsdroid.load()
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/core.py", line 152, in load
self.train_parsers(self.modules["skills"])
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/core.py", line 270, in train_parsers
asyncio.gather(*tasks, loop=self.eventloop))
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/asyncio/base_events.py", line 568, in run_until_complete
return future.result()
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/parsers/rasanlu.py", line 87, in train_rasanlu
intents = await _get_all_intents(skills)
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/parsers/rasanlu.py", line 19, in _get_all_intents
matchers = [matcher for skill in skills for matcher in skill.matchers]
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/parsers/rasanlu.py", line 19, in <listcomp>
matchers = [matcher for skill in skills for matcher in skill.matchers]
AttributeError: 'dict' object has no attribute 'matchers'
|
AttributeError
|
async def _get_all_intents(skills):
"""Get all skill intents and concatenate into a single markdown string."""
intents = [skill["intents"] for skill in skills if skill["intents"] is not None]
if not intents:
return None
intents = "\n\n".join(intents)
return unicodedata.normalize("NFKD", intents).encode("ascii")
|
async def _get_all_intents(skills):
"""Get all skill intents and concatenate into a single markdown string."""
matchers = [matcher for skill in skills for matcher in skill.matchers]
intents = [
matcher["intents"] for matcher in matchers if matcher["intents"] is not None
]
if not intents:
return None
intents = "\n\n".join(intents)
return unicodedata.normalize("NFKD", intents).encode("ascii")
|
https://github.com/opsdroid/opsdroid/issues/860
|
INFO opsdroid.parsers.rasanlu: Starting Rasa NLU training.
DEBUG asyncio: Using selector: KqueueSelector
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/__main__.py", line 206, in <module>
init()
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/__main__.py", line 203, in init
main()
File "/Users/IOBreaker/Developments/Bots/opsdroid/venv/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/IOBreaker/Developments/Bots/opsdroid/venv/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/IOBreaker/Developments/Bots/opsdroid/venv/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/IOBreaker/Developments/Bots/opsdroid/venv/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/__main__.py", line 196, in main
opsdroid.load()
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/core.py", line 152, in load
self.train_parsers(self.modules["skills"])
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/core.py", line 270, in train_parsers
asyncio.gather(*tasks, loop=self.eventloop))
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/asyncio/base_events.py", line 568, in run_until_complete
return future.result()
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/parsers/rasanlu.py", line 87, in train_rasanlu
intents = await _get_all_intents(skills)
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/parsers/rasanlu.py", line 19, in _get_all_intents
matchers = [matcher for skill in skills for matcher in skill.matchers]
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/parsers/rasanlu.py", line 19, in <listcomp>
matchers = [matcher for skill in skills for matcher in skill.matchers]
AttributeError: 'dict' object has no attribute 'matchers'
|
AttributeError
|
async def train_rasanlu(config, skills):
"""Train a Rasa NLU model based on the loaded skills."""
_LOGGER.info(_("Starting Rasa NLU training."))
intents = await _get_all_intents(skills)
if intents is None:
_LOGGER.warning(_("No intents found, skipping training."))
return False
config["model"] = await _get_intents_fingerprint(intents)
if config["model"] in await _get_existing_models(config):
_LOGGER.info(_("This model already exists, skipping training..."))
await _init_model(config)
return True
async with aiohttp.ClientSession() as session:
_LOGGER.info(_("Now training the model. This may take a while..."))
url = await _build_training_url(config)
# https://github.com/RasaHQ/rasa_nlu/blob/master/docs/http.rst#post-train
# Note : The request should always be sent as
# application/x-yml regardless of wether you use
# json or md for the data format. Do not send json as
# application/json for example.+
headers = {"content-type": "application/x-yml"}
try:
training_start = arrow.now()
resp = await session.post(url, data=intents, headers=headers)
except aiohttp.client_exceptions.ClientConnectorError:
_LOGGER.error(_("Unable to connect to Rasa NLU, training failed."))
return False
if resp.status == 200:
result = await resp.json()
if "info" in result and "new model trained" in result["info"]:
time_taken = (arrow.now() - training_start).total_seconds()
_LOGGER.info(
_("Rasa NLU training completed in %s seconds."), int(time_taken)
)
await _init_model(config)
return True
_LOGGER.debug(result)
_LOGGER.error(_("Bad Rasa NLU response - %s"), await resp.text())
_LOGGER.error(_("Rasa NLU training failed."))
return False
|
async def train_rasanlu(config, skills):
"""Train a Rasa NLU model based on the loaded skills."""
_LOGGER.info(_("Starting Rasa NLU training."))
intents = await _get_all_intents(skills)
if intents is None:
_LOGGER.warning(_("No intents found, skipping training."))
return False
config["model"] = await _get_intents_fingerprint(intents)
if config["model"] in await _get_existing_models(config):
_LOGGER.info(_("This model already exists, skipping training..."))
await _init_model(config)
return True
async with aiohttp.ClientSession() as session:
_LOGGER.info(_("Now training the model. This may take a while..."))
url = await _build_training_url(config)
try:
training_start = arrow.now()
resp = await session.post(url, data=intents)
except aiohttp.client_exceptions.ClientConnectorError:
_LOGGER.error(_("Unable to connect to Rasa NLU, training failed."))
return False
if resp.status == 200:
result = await resp.json()
if "info" in result and "new model trained" in result["info"]:
time_taken = (arrow.now() - training_start).total_seconds()
_LOGGER.info(
_("Rasa NLU training completed in %s seconds."), int(time_taken)
)
await _init_model(config)
return True
_LOGGER.debug(result)
_LOGGER.error(_("Bad Rasa NLU response - %s"), await resp.text())
_LOGGER.error(_("Rasa NLU training failed."))
return False
|
https://github.com/opsdroid/opsdroid/issues/860
|
INFO opsdroid.parsers.rasanlu: Starting Rasa NLU training.
DEBUG asyncio: Using selector: KqueueSelector
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/__main__.py", line 206, in <module>
init()
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/__main__.py", line 203, in init
main()
File "/Users/IOBreaker/Developments/Bots/opsdroid/venv/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/IOBreaker/Developments/Bots/opsdroid/venv/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/IOBreaker/Developments/Bots/opsdroid/venv/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/IOBreaker/Developments/Bots/opsdroid/venv/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/__main__.py", line 196, in main
opsdroid.load()
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/core.py", line 152, in load
self.train_parsers(self.modules["skills"])
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/core.py", line 270, in train_parsers
asyncio.gather(*tasks, loop=self.eventloop))
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/asyncio/base_events.py", line 568, in run_until_complete
return future.result()
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/parsers/rasanlu.py", line 87, in train_rasanlu
intents = await _get_all_intents(skills)
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/parsers/rasanlu.py", line 19, in _get_all_intents
matchers = [matcher for skill in skills for matcher in skill.matchers]
File "/Users/IOBreaker/Developments/Bots/opsdroid/opsdroid/parsers/rasanlu.py", line 19, in <listcomp>
matchers = [matcher for skill in skills for matcher in skill.matchers]
AttributeError: 'dict' object has no attribute 'matchers'
|
AttributeError
|
async def parse(self, message):
"""Parse a string against all skills."""
self.stats["messages_parsed"] = self.stats["messages_parsed"] + 1
tasks = []
if message is not None:
if str(message.text).strip():
_LOGGER.debug(_("Parsing input: %s"), message.text)
tasks.append(self.eventloop.create_task(parse_always(self, message)))
unconstrained_skills = await self._constrain_skills(self.skills, message)
ranked_skills = await self.get_ranked_skills(unconstrained_skills, message)
if ranked_skills:
tasks.append(
self.eventloop.create_task(
self.run_skill(
ranked_skills[0]["skill"],
ranked_skills[0]["config"],
ranked_skills[0]["message"],
)
)
)
return tasks
|
async def parse(self, message):
"""Parse a string against all skills."""
self.stats["messages_parsed"] = self.stats["messages_parsed"] + 1
tasks = []
if message is not None:
if str(message.text).strip():
_LOGGER.debug(_("Parsing input: %s"), message.text)
tasks.append(self.eventloop.create_task(parse_always(self, message)))
unconstrained_skills = await self._constrain_skills(self.skills, message)
ranked_skills = await self.get_ranked_skills(unconstrained_skills, message)
if ranked_skills:
tasks.append(
self.eventloop.create_task(
self.run_skill(
ranked_skills[0]["skill"],
ranked_skills[0]["config"],
message,
)
)
)
return tasks
|
https://github.com/opsdroid/opsdroid/issues/872
|
INFO opsdroid.web: Started web server on http://127.0.0.1:8080
DEBUG opsdroid.parsers.crontab: Running crontab skills
DEBUG opsdroid.connector.telegram: {'update_id': 246644579, 'message': {'message_id': 280, 'from': {'id': 111222555, 'is_bot': False, 'first_name': 'IOBreaker', 'language_code': 'en'}, 'chat': {'id': 111222555, 'first_name': 'IOBreaker', 'type': 'private'}, 'date': 1552161920, 'text': 'welcome hicham to paris'}}
DEBUG opsdroid.core: Parsing input: welcome hicham to paris
DEBUG opsdroid.core: Processing parsers...
DEBUG opsdroid-modules.skill.welcome: =========> triggering welcome skill
DEBUG opsdroid.connector.telegram: Responding with: Whoops there has been an error
DEBUG opsdroid.connector.telegram: Successfully responded
DEBUG opsdroid.connector.telegram: Responding with: Check the log for details
DEBUG opsdroid.connector.telegram: Successfully responded
ERROR opsdroid.core: Exception when running skill 'welcome'
Traceback (most recent call last):
File "/Users/hicham/Developments/Bots/opsdroid-iobreaker/opsdroid/opsdroid/core.py", line 336, in run_skill
await skill(self, config, message)
File "/Users/hicham/Library/Application Support/opsdroid/opsdroid-modules/skill/welcome/__init__.py", line 12, in welcome
_LOGGER.debug("=========> group('name') = {}".format(message.regex.group("name")))
IndexError: no such group
|
IndexError
|
async def parse_regex(opsdroid, skills, message):
"""Parse a message against all regex skills."""
matched_skills = []
for skill in skills:
for matcher in skill.matchers:
if "regex" in matcher:
opts = matcher["regex"]
if opts["case_sensitive"]:
regex = re.search(opts["expression"], message.text)
else:
regex = re.search(opts["expression"], message.text, re.IGNORECASE)
if regex:
new_message = copy.copy(message)
new_message.regex = regex
matched_skills.append(
{
"score": await calculate_score(
opts["expression"], opts["score_factor"]
),
"skill": skill,
"config": skill.config,
"message": new_message,
}
)
return matched_skills
|
async def parse_regex(opsdroid, skills, message):
"""Parse a message against all regex skills."""
matched_skills = []
for skill in skills:
for matcher in skill.matchers:
if "regex" in matcher:
opts = matcher["regex"]
if opts["case_sensitive"]:
regex = re.search(opts["expression"], message.text)
else:
regex = re.search(opts["expression"], message.text, re.IGNORECASE)
if regex:
message.regex = regex
matched_skills.append(
{
"score": await calculate_score(
opts["expression"], opts["score_factor"]
),
"skill": skill,
"config": skill.config,
"message": message,
}
)
return matched_skills
|
https://github.com/opsdroid/opsdroid/issues/872
|
INFO opsdroid.web: Started web server on http://127.0.0.1:8080
DEBUG opsdroid.parsers.crontab: Running crontab skills
DEBUG opsdroid.connector.telegram: {'update_id': 246644579, 'message': {'message_id': 280, 'from': {'id': 111222555, 'is_bot': False, 'first_name': 'IOBreaker', 'language_code': 'en'}, 'chat': {'id': 111222555, 'first_name': 'IOBreaker', 'type': 'private'}, 'date': 1552161920, 'text': 'welcome hicham to paris'}}
DEBUG opsdroid.core: Parsing input: welcome hicham to paris
DEBUG opsdroid.core: Processing parsers...
DEBUG opsdroid-modules.skill.welcome: =========> triggering welcome skill
DEBUG opsdroid.connector.telegram: Responding with: Whoops there has been an error
DEBUG opsdroid.connector.telegram: Successfully responded
DEBUG opsdroid.connector.telegram: Responding with: Check the log for details
DEBUG opsdroid.connector.telegram: Successfully responded
ERROR opsdroid.core: Exception when running skill 'welcome'
Traceback (most recent call last):
File "/Users/hicham/Developments/Bots/opsdroid-iobreaker/opsdroid/opsdroid/core.py", line 336, in run_skill
await skill(self, config, message)
File "/Users/hicham/Library/Application Support/opsdroid/opsdroid-modules/skill/welcome/__init__.py", line 12, in welcome
_LOGGER.debug("=========> group('name') = {}".format(message.regex.group("name")))
IndexError: no such group
|
IndexError
|
async def websocket_handler(self, request):
"""Handle for aiohttp handling websocket connections."""
socket = request.match_info.get("socket")
available = [item for item in self.available_connections if item["id"] == socket]
if len(available) != 1:
return aiohttp.web.Response(
text=json.dumps("Please request a socket first"), status=400
)
if (
datetime.now() - available[0]["date"]
).total_seconds() > self.connection_timeout:
self.available_connections.remove(available[0])
return aiohttp.web.Response(
text=json.dumps("Socket request timed out"), status=408
)
self.available_connections.remove(available[0])
_LOGGER.debug("User connected to %s", socket)
websocket = aiohttp.web.WebSocketResponse()
await websocket.prepare(request)
self.active_connections[socket] = websocket
async for msg in websocket:
if msg.type == aiohttp.WSMsgType.TEXT:
message = Message(msg.data, None, None, self)
await self.opsdroid.parse(message)
elif msg.type == aiohttp.WSMsgType.ERROR:
_LOGGER.error(
"Websocket connection closed with exception %s", websocket.exception()
)
_LOGGER.info("websocket connection closed")
self.active_connections.pop(socket, None)
return websocket
|
async def websocket_handler(self, request):
"""Handle for aiohttp handling websocket connections."""
socket = request.match_info.get("socket")
available = [item for item in self.available_connections if item["id"] == socket]
if len(available) != 1:
return aiohttp.web.Response(
text=json.dumps("Please request a socket first"), status=400
)
if (
datetime.now() - available[0]["date"]
).total_seconds() > self.connection_timeout:
self.available_connections.remove(available[0])
return aiohttp.web.Response(
text=json.dumps("Socket request timed out"), status=408
)
self.available_connections.remove(available[0])
_LOGGER.debug("User connected to %s", socket)
websocket = aiohttp.web.WebSocketResponse()
await websocket.prepare(request)
self.active_connections[socket] = websocket
async for msg in websocket:
if msg.type == aiohttp.WSMsgType.TEXT:
message = Message(None, socket, self, msg.data)
await self.opsdroid.parse(message)
elif msg.type == aiohttp.WSMsgType.ERROR:
_LOGGER.error(
"Websocket connection closed with exception %s", websocket.exception()
)
_LOGGER.info("websocket connection closed")
self.active_connections.pop(socket, None)
return websocket
|
https://github.com/opsdroid/opsdroid/issues/867
|
DEBUG opsdroid.connector.websocket: Starting Websocket connector
INFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.
INFO opsdroid.web: Started web server on http://127.0.0.1:8080
INFO aiohttp.access: 127.0.0.1 [07/Mar/2019:14:35:22 +0000] "POST /connector/websocket HTTP/1.1" 200 220 "-" "-"
DEBUG opsdroid.connector.websocket: User connected to 3d406d62-40e6-11e9-bd4a-4865ee166460
ERROR aiohttp.server: Error handling request
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/aiohttp/web_protocol.py", line 418, in start
resp = await task
File "/usr/lib/python3.7/site-packages/aiohttp/web_app.py", line 458, in _handle
resp = await handler(request)
File "/home/hicham/Developments/Bots/opsdroid/opsdroid-master/opsdroid/connector/websocket/__init__.py", line 89, in websocket_handler
await self.opsdroid.parse(message)
File "/home/hicham/Developments/Bots/opsdroid/opsdroid-master/opsdroid/core.py", line 435, in parse
if message is not None and message.text.strip() != "":
AttributeError: 'NoneType' object has no attribute 'strip'
ERROR aiohttp.server: Unhandled exception
|
AttributeError
|
async def parse(self, message):
"""Parse a string against all skills."""
self.stats["messages_parsed"] = self.stats["messages_parsed"] + 1
tasks = []
if message is not None:
if str(message.text).strip():
_LOGGER.debug(_("Parsing input: %s"), message.text)
tasks.append(self.eventloop.create_task(parse_always(self, message)))
unconstrained_skills = await self._constrain_skills(self.skills, message)
ranked_skills = await self.get_ranked_skills(unconstrained_skills, message)
if ranked_skills:
tasks.append(
self.eventloop.create_task(
self.run_skill(
ranked_skills[0]["skill"],
ranked_skills[0]["config"],
message,
)
)
)
return tasks
|
async def parse(self, message):
"""Parse a string against all skills."""
self.stats["messages_parsed"] = self.stats["messages_parsed"] + 1
tasks = []
if message is not None and message.text.strip() != "":
_LOGGER.debug(_("Parsing input: %s"), message.text)
tasks.append(self.eventloop.create_task(parse_always(self, message)))
unconstrained_skills = await self._constrain_skills(self.skills, message)
ranked_skills = await self.get_ranked_skills(unconstrained_skills, message)
if ranked_skills:
tasks.append(
self.eventloop.create_task(
self.run_skill(
ranked_skills[0]["skill"], ranked_skills[0]["config"], message
)
)
)
return tasks
|
https://github.com/opsdroid/opsdroid/issues/867
|
DEBUG opsdroid.connector.websocket: Starting Websocket connector
INFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.
INFO opsdroid.web: Started web server on http://127.0.0.1:8080
INFO aiohttp.access: 127.0.0.1 [07/Mar/2019:14:35:22 +0000] "POST /connector/websocket HTTP/1.1" 200 220 "-" "-"
DEBUG opsdroid.connector.websocket: User connected to 3d406d62-40e6-11e9-bd4a-4865ee166460
ERROR aiohttp.server: Error handling request
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/aiohttp/web_protocol.py", line 418, in start
resp = await task
File "/usr/lib/python3.7/site-packages/aiohttp/web_app.py", line 458, in _handle
resp = await handler(request)
File "/home/hicham/Developments/Bots/opsdroid/opsdroid-master/opsdroid/connector/websocket/__init__.py", line 89, in websocket_handler
await self.opsdroid.parse(message)
File "/home/hicham/Developments/Bots/opsdroid/opsdroid-master/opsdroid/core.py", line 435, in parse
if message is not None and message.text.strip() != "":
AttributeError: 'NoneType' object has no attribute 'strip'
ERROR aiohttp.server: Unhandled exception
|
AttributeError
|
def __init__(self, config, opsdroid=None):
"""Create the connector.
Args:
config (dict): configuration settings from the
file config.yaml.
"""
_LOGGER.debug("Loaded telegram connector")
super().__init__(config, opsdroid=opsdroid)
self.name = "telegram"
self.opsdroid = opsdroid
self.latest_update = None
self.default_room = None
self.listening = True
self.default_user = config.get("default-user", None)
self.whitelisted_users = config.get("whitelisted-users", None)
self.update_interval = config.get("update_interval", 1)
self.session = None
self._closing = asyncio.Event()
self.loop = asyncio.get_event_loop()
try:
self.token = config["token"]
except (KeyError, AttributeError):
_LOGGER.error(
"Unable to login: Access token is missing. "
"Telegram connector will be unavailable."
)
|
def __init__(self, config, opsdroid=None):
"""Create the connector.
Args:
config (dict): configuration settings from the
file config.yaml.
"""
_LOGGER.debug("Loaded telegram connector")
super().__init__(config, opsdroid=opsdroid)
self.name = "telegram"
self.opsdroid = opsdroid
self.latest_update = None
self.default_room = None
self.listening = True
self.default_user = config.get("default-user", None)
self.whitelisted_users = config.get("whitelisted-users", None)
self.update_interval = config.get("update_interval", 1)
try:
self.token = config["token"]
except (KeyError, AttributeError):
_LOGGER.error(
"Unable to login: Access token is missing. "
"Telegram connector will be unavailable."
)
|
https://github.com/opsdroid/opsdroid/issues/840
|
DEBUG opsdroid.connector.telegram: {'update_id': 539000683, 'channel_post': {'message_id': 15, 'chat': {'id': -1002528219498, 'title': 'vLab', 'type': 'channel'}, 'date': 1550000742, 'text': 'hi'}}
DEBUG opsdroid.connector.telegram: {'update_id': 539026984, 'message': {'message_id': 53, 'from': {'id': 452280388, 'is_bot': False, 'first_name': 'IOBreaker', 'language_code': 'en'}, 'chat': {'id': 452266388, 'first_name': 'IOBreaker', 'type': 'private'}, 'date': 1550000813, 'text': '/start', 'entities': [{'offset': 0, 'length': 6, 'type': 'bot_command'}]}}
DEBUG asyncio: Using selector: EpollSelector
Traceback (most recent call last):
File "/usr/bin/opsdroid", line 11, in <module>
load_entry_point('opsdroid==0.14.1', 'console_scripts', 'opsdroid')()
File "/usr/lib64/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/opsdroid/__main__.py", line 197, in main
opsdroid.run()
File "/usr/lib/python3.6/site-packages/opsdroid/core.py", line 135, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "/usr/lib64/python3.6/asyncio/base_events.py", line 473, in run_until_complete
return future.result()
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 163, in listen
await self._get_messages()
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 143, in _get_messages
await self._parse_message(json)
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 99, in _parse_message
user = result["message"]["from"]["username"]
KeyError: 'username'
|
KeyError
|
async def connect(self):
"""Connect to Telegram.
This method is not an authorization call. It basically
checks if the API token was provided and makes an API
call to Telegram and evaluates the status of the call.
"""
_LOGGER.debug("Connecting to telegram")
self.session = aiohttp.ClientSession()
resp = await self.session.get(self.build_url("getMe"))
if resp.status != 200:
_LOGGER.error("Unable to connect")
_LOGGER.error("Telegram error %s, %s", resp.status, resp.text)
else:
json = await resp.json()
_LOGGER.debug(json)
_LOGGER.debug("Connected to telegram as %s", json["result"]["username"])
|
async def connect(self):
"""Connect to Telegram.
This method is not an authorization call. It basically
checks if the API token was provided and makes an API
call to Telegram and evaluates the status of the call.
"""
_LOGGER.debug("Connecting to telegram")
async with aiohttp.ClientSession() as session:
resp = await session.get(self.build_url("getMe"))
if resp.status != 200:
_LOGGER.error("Unable to connect")
_LOGGER.error("Telegram error %s, %s", resp.status, resp.text)
else:
json = await resp.json()
_LOGGER.debug(json)
_LOGGER.debug("Connected to telegram as %s", json["result"]["username"])
|
https://github.com/opsdroid/opsdroid/issues/840
|
DEBUG opsdroid.connector.telegram: {'update_id': 539000683, 'channel_post': {'message_id': 15, 'chat': {'id': -1002528219498, 'title': 'vLab', 'type': 'channel'}, 'date': 1550000742, 'text': 'hi'}}
DEBUG opsdroid.connector.telegram: {'update_id': 539026984, 'message': {'message_id': 53, 'from': {'id': 452280388, 'is_bot': False, 'first_name': 'IOBreaker', 'language_code': 'en'}, 'chat': {'id': 452266388, 'first_name': 'IOBreaker', 'type': 'private'}, 'date': 1550000813, 'text': '/start', 'entities': [{'offset': 0, 'length': 6, 'type': 'bot_command'}]}}
DEBUG asyncio: Using selector: EpollSelector
Traceback (most recent call last):
File "/usr/bin/opsdroid", line 11, in <module>
load_entry_point('opsdroid==0.14.1', 'console_scripts', 'opsdroid')()
File "/usr/lib64/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/opsdroid/__main__.py", line 197, in main
opsdroid.run()
File "/usr/lib/python3.6/site-packages/opsdroid/core.py", line 135, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "/usr/lib64/python3.6/asyncio/base_events.py", line 473, in run_until_complete
return future.result()
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 163, in listen
await self._get_messages()
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 143, in _get_messages
await self._parse_message(json)
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 99, in _parse_message
user = result["message"]["from"]["username"]
KeyError: 'username'
|
KeyError
|
async def _parse_message(self, response):
"""Handle logic to parse a received message.
Since everyone can send a private message to any user/bot
in Telegram, this method allows to set a list of whitelisted
users that can interact with the bot. If any other user tries
to interact with the bot the command is not parsed and instead
the bot will inform that user that he is not allowed to talk
with the bot.
We also set self.latest_update to +1 in order to get the next
available message (or an empty {} if no message has been received
yet) with the method self._get_messages().
Args:
response (dict): Response returned by aiohttp.ClientSession.
"""
for result in response["result"]:
_LOGGER.debug(result)
if "channel" in result["message"]["chat"]["type"]:
_LOGGER.debug("Channel message parsing not supported - Ignoring message")
elif "message" in result and "text" in result["message"]:
user = self.get_user(result)
message = Message(
user, result["message"]["chat"], self, result["message"]["text"]
)
if self.handle_user_permission(result, user):
await self.opsdroid.parse(message)
else:
message.text = "Sorry, you're not allowed to speak with this bot."
await self.respond(message)
self.latest_update = result["update_id"] + 1
else:
_LOGGER.error("Unable to parse the message.")
|
async def _parse_message(self, response):
"""Handle logic to parse a received message.
Since everyone can send a private message to any user/bot
in Telegram, this method allows to set a list of whitelisted
users that can interact with the bot. If any other user tries
to interact with the bot the command is not parsed and instead
the bot will inform that user that he is not allowed to talk
with the bot.
We also set self.latest_update to +1 in order to get the next
available message (or an empty {} if no message has been received
yet) with the method self._get_messages().
Args:
response (dict): Response returned by aiohttp.ClientSession.
"""
for result in response["result"]:
_LOGGER.debug(result)
if result["message"]["text"]:
user = result["message"]["from"]["username"]
message = Message(
user, result["message"]["chat"], self, result["message"]["text"]
)
if not self.whitelisted_users or user in self.whitelisted_users:
await self.opsdroid.parse(message)
else:
message.text = "Sorry, you're not allowed to speak with this bot."
await self.respond(message)
self.latest_update = result["update_id"] + 1
|
https://github.com/opsdroid/opsdroid/issues/840
|
DEBUG opsdroid.connector.telegram: {'update_id': 539000683, 'channel_post': {'message_id': 15, 'chat': {'id': -1002528219498, 'title': 'vLab', 'type': 'channel'}, 'date': 1550000742, 'text': 'hi'}}
DEBUG opsdroid.connector.telegram: {'update_id': 539026984, 'message': {'message_id': 53, 'from': {'id': 452280388, 'is_bot': False, 'first_name': 'IOBreaker', 'language_code': 'en'}, 'chat': {'id': 452266388, 'first_name': 'IOBreaker', 'type': 'private'}, 'date': 1550000813, 'text': '/start', 'entities': [{'offset': 0, 'length': 6, 'type': 'bot_command'}]}}
DEBUG asyncio: Using selector: EpollSelector
Traceback (most recent call last):
File "/usr/bin/opsdroid", line 11, in <module>
load_entry_point('opsdroid==0.14.1', 'console_scripts', 'opsdroid')()
File "/usr/lib64/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/opsdroid/__main__.py", line 197, in main
opsdroid.run()
File "/usr/lib/python3.6/site-packages/opsdroid/core.py", line 135, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "/usr/lib64/python3.6/asyncio/base_events.py", line 473, in run_until_complete
return future.result()
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 163, in listen
await self._get_messages()
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 143, in _get_messages
await self._parse_message(json)
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 99, in _parse_message
user = result["message"]["from"]["username"]
KeyError: 'username'
|
KeyError
|
async def _get_messages(self):
"""Connect to the Telegram API.
Uses an aiohttp ClientSession to connect to Telegram API
and get the latest messages from the chat service.
The data["offset"] is used to consume every new message, the API
returns an int - "update_id" value. In order to get the next
message this value needs to be increased by 1 the next time
the API is called. If no new messages exists the API will just
return an empty {}.
"""
data = {}
if self.latest_update is not None:
data["offset"] = self.latest_update
await asyncio.sleep(self.update_interval)
resp = await self.session.get(self.build_url("getUpdates"), params=data)
if resp.status == 409:
_LOGGER.info(
"Can't get updates because previous "
"webhook is still active. Will try to "
"delete webhook."
)
await self.delete_webhook()
if resp.status != 200:
_LOGGER.error("Telegram error %s, %s", resp.status, resp.text)
self.listening = False
else:
json = await resp.json()
await self._parse_message(json)
|
async def _get_messages(self):
"""Connect to the Telegram API.
Uses an aiohttp ClientSession to connect to Telegram API
and get the latest messages from the chat service.
The data["offset"] is used to consume every new message, the API
returns an int - "update_id" value. In order to get the next
message this value needs to be increased by 1 the next time
the API is called. If no new messages exists the API will just
return an empty {}.
"""
async with aiohttp.ClientSession() as session:
data = {}
if self.latest_update is not None:
data["offset"] = self.latest_update
resp = await session.get(self.build_url("getUpdates"), params=data)
if resp.status != 200:
_LOGGER.error("Telegram error %s, %s", resp.status, resp.text)
self.listening = False
else:
json = await resp.json()
await self._parse_message(json)
|
https://github.com/opsdroid/opsdroid/issues/840
|
DEBUG opsdroid.connector.telegram: {'update_id': 539000683, 'channel_post': {'message_id': 15, 'chat': {'id': -1002528219498, 'title': 'vLab', 'type': 'channel'}, 'date': 1550000742, 'text': 'hi'}}
DEBUG opsdroid.connector.telegram: {'update_id': 539026984, 'message': {'message_id': 53, 'from': {'id': 452280388, 'is_bot': False, 'first_name': 'IOBreaker', 'language_code': 'en'}, 'chat': {'id': 452266388, 'first_name': 'IOBreaker', 'type': 'private'}, 'date': 1550000813, 'text': '/start', 'entities': [{'offset': 0, 'length': 6, 'type': 'bot_command'}]}}
DEBUG asyncio: Using selector: EpollSelector
Traceback (most recent call last):
File "/usr/bin/opsdroid", line 11, in <module>
load_entry_point('opsdroid==0.14.1', 'console_scripts', 'opsdroid')()
File "/usr/lib64/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/opsdroid/__main__.py", line 197, in main
opsdroid.run()
File "/usr/lib/python3.6/site-packages/opsdroid/core.py", line 135, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "/usr/lib64/python3.6/asyncio/base_events.py", line 473, in run_until_complete
return future.result()
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 163, in listen
await self._get_messages()
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 143, in _get_messages
await self._parse_message(json)
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 99, in _parse_message
user = result["message"]["from"]["username"]
KeyError: 'username'
|
KeyError
|
async def listen(self):
"""Listen method of the connector.
Every connector has to implement the listen method. When an
infinite loop is running, it becomes hard to cancel this task.
So we are creating a task and set it on a variable so we can
cancel the task.
"""
message_getter = self.loop.create_task(self.get_messages_loop())
await self._closing.wait()
message_getter.cancel()
|
async def listen(self):
"""Listen for and parse new messages.
The bot will always listen to all opened chat windows,
as long as opsdroid is running. Since anyone can start
a new chat with the bot is recommended that a list of
users to be whitelisted be provided in config.yaml.
The method will sleep asynchronously at the end of
every loop. The time can either be specified in the
config.yaml with the param update-interval - this
defaults to 1 second.
Args:
opsdroid (OpsDroid): An instance of opsdroid core.
"""
while self.listening:
await self._get_messages()
await asyncio.sleep(self.update_interval)
|
https://github.com/opsdroid/opsdroid/issues/840
|
DEBUG opsdroid.connector.telegram: {'update_id': 539000683, 'channel_post': {'message_id': 15, 'chat': {'id': -1002528219498, 'title': 'vLab', 'type': 'channel'}, 'date': 1550000742, 'text': 'hi'}}
DEBUG opsdroid.connector.telegram: {'update_id': 539026984, 'message': {'message_id': 53, 'from': {'id': 452280388, 'is_bot': False, 'first_name': 'IOBreaker', 'language_code': 'en'}, 'chat': {'id': 452266388, 'first_name': 'IOBreaker', 'type': 'private'}, 'date': 1550000813, 'text': '/start', 'entities': [{'offset': 0, 'length': 6, 'type': 'bot_command'}]}}
DEBUG asyncio: Using selector: EpollSelector
Traceback (most recent call last):
File "/usr/bin/opsdroid", line 11, in <module>
load_entry_point('opsdroid==0.14.1', 'console_scripts', 'opsdroid')()
File "/usr/lib64/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/opsdroid/__main__.py", line 197, in main
opsdroid.run()
File "/usr/lib/python3.6/site-packages/opsdroid/core.py", line 135, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "/usr/lib64/python3.6/asyncio/base_events.py", line 473, in run_until_complete
return future.result()
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 163, in listen
await self._get_messages()
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 143, in _get_messages
await self._parse_message(json)
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 99, in _parse_message
user = result["message"]["from"]["username"]
KeyError: 'username'
|
KeyError
|
async def respond(self, message, room=None):
"""Respond with a message.
Args:
message (object): An instance of Message.
room (string, optional): Name of the room to respond to.
"""
_LOGGER.debug("Responding with: %s", message.text)
data = dict()
data["chat_id"] = message.room["id"]
data["text"] = message.text
resp = await self.session.post(self.build_url("sendMessage"), data=data)
if resp.status == 200:
_LOGGER.debug("Successfully responded")
else:
_LOGGER.error("Unable to respond.")
|
async def respond(self, message, room=None):
"""Respond with a message.
Args:
message (object): An instance of Message.
room (string, optional): Name of the room to respond to.
"""
_LOGGER.debug("Responding with: %s", message.text)
async with aiohttp.ClientSession() as session:
data = {}
data["chat_id"] = message.room["id"]
data["text"] = message.text
resp = await session.post(self.build_url("sendMessage"), data=data)
if resp.status == 200:
_LOGGER.debug("Successfully responded")
else:
_LOGGER.error("Unable to respond.")
|
https://github.com/opsdroid/opsdroid/issues/840
|
DEBUG opsdroid.connector.telegram: {'update_id': 539000683, 'channel_post': {'message_id': 15, 'chat': {'id': -1002528219498, 'title': 'vLab', 'type': 'channel'}, 'date': 1550000742, 'text': 'hi'}}
DEBUG opsdroid.connector.telegram: {'update_id': 539026984, 'message': {'message_id': 53, 'from': {'id': 452280388, 'is_bot': False, 'first_name': 'IOBreaker', 'language_code': 'en'}, 'chat': {'id': 452266388, 'first_name': 'IOBreaker', 'type': 'private'}, 'date': 1550000813, 'text': '/start', 'entities': [{'offset': 0, 'length': 6, 'type': 'bot_command'}]}}
DEBUG asyncio: Using selector: EpollSelector
Traceback (most recent call last):
File "/usr/bin/opsdroid", line 11, in <module>
load_entry_point('opsdroid==0.14.1', 'console_scripts', 'opsdroid')()
File "/usr/lib64/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib64/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/opsdroid/__main__.py", line 197, in main
opsdroid.run()
File "/usr/lib/python3.6/site-packages/opsdroid/core.py", line 135, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "/usr/lib64/python3.6/asyncio/base_events.py", line 473, in run_until_complete
return future.result()
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 163, in listen
await self._get_messages()
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 143, in _get_messages
await self._parse_message(json)
File "/usr/lib/python3.6/site-packages/opsdroid/connector/telegram/__init__.py", line 99, in _parse_message
user = result["message"]["from"]["username"]
KeyError: 'username'
|
KeyError
|
async def connect(self):
"""Connect to the chat service."""
_LOGGER.info("Connecting to Slack")
try:
connection = await self.slacker.rtm.start()
self.websocket = await websockets.connect(connection.body["url"])
_LOGGER.debug("Connected as %s", self.bot_name)
_LOGGER.debug("Using icon %s", self.icon_emoji)
_LOGGER.debug("Default room is %s", self.default_room)
_LOGGER.info("Connected successfully")
if self.keepalive is None or self.keepalive.done():
self.keepalive = self.opsdroid.eventloop.create_task(
self.keepalive_websocket()
)
except aiohttp.ClientOSError as error:
_LOGGER.error(error)
_LOGGER.error("Failed to connect to Slack, retrying in 10")
await self.reconnect(10)
except slacker.Error as error:
_LOGGER.error(
"Unable to connect to Slack due to %s - "
"The Slack Connector will not be available.",
error,
)
except Exception:
await self.disconnect()
raise
|
async def connect(self):
"""Connect to the chat service."""
_LOGGER.info("Connecting to Slack")
try:
connection = await self.slacker.rtm.start()
self.websocket = await websockets.connect(connection.body["url"])
_LOGGER.debug("Connected as %s", self.bot_name)
_LOGGER.debug("Using icon %s", self.icon_emoji)
_LOGGER.debug("Default room is %s", self.default_room)
_LOGGER.info("Connected successfully")
if self.keepalive is None or self.keepalive.done():
self.keepalive = self.opsdroid.eventloop.create_task(
self.keepalive_websocket()
)
except aiohttp.ClientOSError as error:
_LOGGER.error(error)
_LOGGER.error("Failed to connect to Slack, retrying in 10")
await self.reconnect(10)
except Exception:
await self.disconnect()
raise
|
https://github.com/opsdroid/opsdroid/issues/764
|
Traceback (most recent call last):
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/__main__.py", line 206, in <module>
init()
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/__main__.py", line 203, in init
main()
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/__main__.py", line 196, in main
opsdroid.load()
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/core.py", line 153, in load
self.start_connectors(self.modules["connectors"])
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/core.py", line 248, in start_connectors
self.eventloop.run_until_complete(connector.connect(self))
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/base_events.py", line 466, in run_until_complete
return future.result()
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/connector/slack/__init__.py", line 50, in connect
connection = await self.slacker.rtm.start()
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/aioslacker/__init__.py", line 97, in __request
raise Error(response.error)
slacker.Error: invalid_auth
Exception ignored in: <bound method BaseEventLoop.__del__ of <_UnixSelectorEventLoop running=False closed=True debug=False>>
Traceback (most recent call last):
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/base_events.py", line 511, in __del__
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/unix_events.py", line 65, in close
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/unix_events.py", line 146, in remove_signal_handler
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/signal.py", line 47, in signal
TypeError: signal handler must be signal.SIG_IGN, signal.SIG_DFL, or a callable object
|
slacker.Error
|
async def listen(self):
"""Listen for and parse new messages."""
while self.listening:
try:
await self.receive_from_websocket()
except AttributeError:
break
|
async def listen(self):
"""Listen for and parse new messages."""
while self.listening:
await self.receive_from_websocket()
|
https://github.com/opsdroid/opsdroid/issues/764
|
Traceback (most recent call last):
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/__main__.py", line 206, in <module>
init()
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/__main__.py", line 203, in init
main()
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/__main__.py", line 196, in main
opsdroid.load()
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/core.py", line 153, in load
self.start_connectors(self.modules["connectors"])
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/core.py", line 248, in start_connectors
self.eventloop.run_until_complete(connector.connect(self))
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/base_events.py", line 466, in run_until_complete
return future.result()
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/connector/slack/__init__.py", line 50, in connect
connection = await self.slacker.rtm.start()
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/aioslacker/__init__.py", line 97, in __request
raise Error(response.error)
slacker.Error: invalid_auth
Exception ignored in: <bound method BaseEventLoop.__del__ of <_UnixSelectorEventLoop running=False closed=True debug=False>>
Traceback (most recent call last):
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/base_events.py", line 511, in __del__
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/unix_events.py", line 65, in close
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/unix_events.py", line 146, in remove_signal_handler
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/signal.py", line 47, in signal
TypeError: signal handler must be signal.SIG_IGN, signal.SIG_DFL, or a callable object
|
slacker.Error
|
def __init__(self):
"""Start opsdroid."""
self.bot_name = "opsdroid"
self.sys_status = 0
self.connectors = []
self.connector_tasks = []
self.eventloop = asyncio.get_event_loop()
for sig in (signal.SIGINT, signal.SIGTERM):
self.eventloop.add_signal_handler(sig, self.stop)
self.skills = []
self.memory = Memory()
self.loader = Loader(self)
self.config = {}
self.stats = {
"messages_parsed": 0,
"webhooks_called": 0,
"total_response_time": 0,
"total_responses": 0,
}
self.web_server = None
self.should_restart = False
self.stored_path = []
_LOGGER.info("Created main opsdroid object")
|
def __init__(self):
"""Start opsdroid."""
self.bot_name = "opsdroid"
self.sys_status = 0
self.connectors = []
self.connector_tasks = []
self.eventloop = asyncio.get_event_loop()
self.skills = []
self.memory = Memory()
self.loader = Loader(self)
self.config = {}
self.stats = {
"messages_parsed": 0,
"webhooks_called": 0,
"total_response_time": 0,
"total_responses": 0,
}
self.web_server = None
self.should_restart = False
self.stored_path = []
_LOGGER.info("Created main opsdroid object")
|
https://github.com/opsdroid/opsdroid/issues/247
|
^CERROR asyncio: Task exception was never retrieved
future: <Task finished coro=<ConnectorShell.listen() done, defined at /root/.opsdroid/modules/opsdroid-modules/connector/shell/__init__.py:57> exception=ConnectionResetError('Connection lost',)>
Traceback (most recent call last):
File "/usr/local/lib/python3.5/asyncio/tasks.py", line 240, in _step
result = coro.send(None)
File "/root/.opsdroid/modules/opsdroid-modules/connector/shell/__init__.py", line 63, in listen
user_input = await async_input('', opsdroid.eventloop)
File "/root/.opsdroid/modules/opsdroid-modules/connector/shell/__init__.py", line 37, in async_input
await writer.drain()
File "/usr/local/lib/python3.5/asyncio/streams.py", line 333, in drain
yield from self._protocol._drain_helper()
File "/usr/local/lib/python3.5/asyncio/streams.py", line 204, in _drain_helper
raise ConnectionResetError('Connection lost')
ConnectionResetError: Connection lost
ERROR asyncio: Exception in default exception handler
Traceback (most recent call last):
File "/usr/local/lib/python3.5/asyncio/base_events.py", line 1284, in call_exception_handler
self.default_exception_handler(context)
File "/usr/local/lib/python3.5/asyncio/base_events.py", line 1259, in default_exception_handler
logger.error('\n'.join(log_lines), exc_info=exc_info)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1314, in error
self._log(ERROR, msg, args, **kwargs)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1421, in _log
self.handle(record)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1431, in handle
self.callHandlers(record)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1493, in callHandlers
hdlr.handle(record)
File "/usr/local/lib/python3.5/logging/__init__.py", line 861, in handle
self.emit(record)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1053, in emit
self.stream = self._open()
File "/usr/local/lib/python3.5/logging/__init__.py", line 1043, in _open
return open(self.baseFilename, self.mode, encoding=self.encoding)
NameError: name 'open' is not defined
Exception ignored in: <bound method Task.__del__ of <Task finished coro=<ConnectorShell.listen() done, defined at /root/.opsdroid/modules/opsdroid-modules/connector/shell/__init__.py:57> exception=ConnectionResetError('Connection lost',)>>
Traceback (most recent call last):
File "/usr/local/lib/python3.5/asyncio/tasks.py", line 93, in __del__
File "/usr/local/lib/python3.5/asyncio/futures.py", line 234, in __del__
File "/usr/local/lib/python3.5/asyncio/base_events.py", line 1290, in call_exception_handler
File "/usr/local/lib/python3.5/logging/__init__.py", line 1314, in error
File "/usr/local/lib/python3.5/logging/__init__.py", line 1421, in _log
File "/usr/local/lib/python3.5/logging/__init__.py", line 1431, in handle
File "/usr/local/lib/python3.5/logging/__init__.py", line 1493, in callHandlers
File "/usr/local/lib/python3.5/logging/__init__.py", line 861, in handle
File "/usr/local/lib/python3.5/logging/__init__.py", line 1053, in emit
File "/usr/local/lib/python3.5/logging/__init__.py", line 1043, in _open
NameError: name 'open' is not defined
ERROR asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<parse_crontab() running at /usr/src/app/opsdroid/parsers/crontab.py:20> wait_for=<Future pending cb=[Task._wakeup()]>>
ERROR asyncio: Exception in default exception handler
Traceback (most recent call last):
File "/usr/local/lib/python3.5/asyncio/base_events.py", line 1284, in call_exception_handler
self.default_exception_handler(context)
File "/usr/local/lib/python3.5/asyncio/base_events.py", line 1259, in default_exception_handler
logger.error('\n'.join(log_lines), exc_info=exc_info)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1314, in error
self._log(ERROR, msg, args, **kwargs)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1421, in _log
self.handle(record)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1431, in handle
self.callHandlers(record)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1493, in callHandlers
hdlr.handle(record)
File "/usr/local/lib/python3.5/logging/__init__.py", line 861, in handle
self.emit(record)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1053, in emit
self.stream = self._open()
File "/usr/local/lib/python3.5/logging/__init__.py", line 1043, in _open
return open(self.baseFilename, self.mode, encoding=self.encoding)
NameError: name 'open' is not defined
Exception ignored in: <bound method Task.__del__ of <Task pending coro=<parse_crontab() running at /usr/src/app/opsdroid/parsers/crontab.py:20> wait_for=<Future pending cb=[Task._wakeup()]>>>
Traceback (most recent call last):
File "/usr/local/lib/python3.5/asyncio/tasks.py", line 92, in __del__
File "/usr/local/lib/python3.5/asyncio/base_events.py", line 1290, in call_exception_handler
File "/usr/local/lib/python3.5/logging/__init__.py", line 1314, in error
File "/usr/local/lib/python3.5/logging/__init__.py", line 1421, in _log
File "/usr/local/lib/python3.5/logging/__init__.py", line 1431, in handle
File "/usr/local/lib/python3.5/logging/__init__.py", line 1493, in callHandlers
File "/usr/local/lib/python3.5/logging/__init__.py", line 861, in handle
File "/usr/local/lib/python3.5/logging/__init__.py", line 1053, in emit
File "/usr/local/lib/python3.5/logging/__init__.py", line 1043, in _open
NameError: name 'open' is not defined
|
ConnectionResetError
|
def stop(self):
"""Stop the event loop."""
pending = asyncio.Task.all_tasks()
for task in pending:
task.cancel()
self.eventloop.stop()
print("") # Prints a character return for return to shell
_LOGGER.info("Keyboard interrupt, exiting.")
|
def stop(self):
"""Stop the event loop."""
pending = asyncio.Task.all_tasks()
for task in pending:
task.cancel()
self.eventloop.stop()
|
https://github.com/opsdroid/opsdroid/issues/247
|
^CERROR asyncio: Task exception was never retrieved
future: <Task finished coro=<ConnectorShell.listen() done, defined at /root/.opsdroid/modules/opsdroid-modules/connector/shell/__init__.py:57> exception=ConnectionResetError('Connection lost',)>
Traceback (most recent call last):
File "/usr/local/lib/python3.5/asyncio/tasks.py", line 240, in _step
result = coro.send(None)
File "/root/.opsdroid/modules/opsdroid-modules/connector/shell/__init__.py", line 63, in listen
user_input = await async_input('', opsdroid.eventloop)
File "/root/.opsdroid/modules/opsdroid-modules/connector/shell/__init__.py", line 37, in async_input
await writer.drain()
File "/usr/local/lib/python3.5/asyncio/streams.py", line 333, in drain
yield from self._protocol._drain_helper()
File "/usr/local/lib/python3.5/asyncio/streams.py", line 204, in _drain_helper
raise ConnectionResetError('Connection lost')
ConnectionResetError: Connection lost
ERROR asyncio: Exception in default exception handler
Traceback (most recent call last):
File "/usr/local/lib/python3.5/asyncio/base_events.py", line 1284, in call_exception_handler
self.default_exception_handler(context)
File "/usr/local/lib/python3.5/asyncio/base_events.py", line 1259, in default_exception_handler
logger.error('\n'.join(log_lines), exc_info=exc_info)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1314, in error
self._log(ERROR, msg, args, **kwargs)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1421, in _log
self.handle(record)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1431, in handle
self.callHandlers(record)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1493, in callHandlers
hdlr.handle(record)
File "/usr/local/lib/python3.5/logging/__init__.py", line 861, in handle
self.emit(record)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1053, in emit
self.stream = self._open()
File "/usr/local/lib/python3.5/logging/__init__.py", line 1043, in _open
return open(self.baseFilename, self.mode, encoding=self.encoding)
NameError: name 'open' is not defined
Exception ignored in: <bound method Task.__del__ of <Task finished coro=<ConnectorShell.listen() done, defined at /root/.opsdroid/modules/opsdroid-modules/connector/shell/__init__.py:57> exception=ConnectionResetError('Connection lost',)>>
Traceback (most recent call last):
File "/usr/local/lib/python3.5/asyncio/tasks.py", line 93, in __del__
File "/usr/local/lib/python3.5/asyncio/futures.py", line 234, in __del__
File "/usr/local/lib/python3.5/asyncio/base_events.py", line 1290, in call_exception_handler
File "/usr/local/lib/python3.5/logging/__init__.py", line 1314, in error
File "/usr/local/lib/python3.5/logging/__init__.py", line 1421, in _log
File "/usr/local/lib/python3.5/logging/__init__.py", line 1431, in handle
File "/usr/local/lib/python3.5/logging/__init__.py", line 1493, in callHandlers
File "/usr/local/lib/python3.5/logging/__init__.py", line 861, in handle
File "/usr/local/lib/python3.5/logging/__init__.py", line 1053, in emit
File "/usr/local/lib/python3.5/logging/__init__.py", line 1043, in _open
NameError: name 'open' is not defined
ERROR asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<parse_crontab() running at /usr/src/app/opsdroid/parsers/crontab.py:20> wait_for=<Future pending cb=[Task._wakeup()]>>
ERROR asyncio: Exception in default exception handler
Traceback (most recent call last):
File "/usr/local/lib/python3.5/asyncio/base_events.py", line 1284, in call_exception_handler
self.default_exception_handler(context)
File "/usr/local/lib/python3.5/asyncio/base_events.py", line 1259, in default_exception_handler
logger.error('\n'.join(log_lines), exc_info=exc_info)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1314, in error
self._log(ERROR, msg, args, **kwargs)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1421, in _log
self.handle(record)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1431, in handle
self.callHandlers(record)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1493, in callHandlers
hdlr.handle(record)
File "/usr/local/lib/python3.5/logging/__init__.py", line 861, in handle
self.emit(record)
File "/usr/local/lib/python3.5/logging/__init__.py", line 1053, in emit
self.stream = self._open()
File "/usr/local/lib/python3.5/logging/__init__.py", line 1043, in _open
return open(self.baseFilename, self.mode, encoding=self.encoding)
NameError: name 'open' is not defined
Exception ignored in: <bound method Task.__del__ of <Task pending coro=<parse_crontab() running at /usr/src/app/opsdroid/parsers/crontab.py:20> wait_for=<Future pending cb=[Task._wakeup()]>>>
Traceback (most recent call last):
File "/usr/local/lib/python3.5/asyncio/tasks.py", line 92, in __del__
File "/usr/local/lib/python3.5/asyncio/base_events.py", line 1290, in call_exception_handler
File "/usr/local/lib/python3.5/logging/__init__.py", line 1314, in error
File "/usr/local/lib/python3.5/logging/__init__.py", line 1421, in _log
File "/usr/local/lib/python3.5/logging/__init__.py", line 1431, in handle
File "/usr/local/lib/python3.5/logging/__init__.py", line 1493, in callHandlers
File "/usr/local/lib/python3.5/logging/__init__.py", line 861, in handle
File "/usr/local/lib/python3.5/logging/__init__.py", line 1053, in emit
File "/usr/local/lib/python3.5/logging/__init__.py", line 1043, in _open
NameError: name 'open' is not defined
|
ConnectionResetError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.