after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def to_representation(self, obj):
serializer_class = None
if type(self) is UnifiedJobTemplateSerializer:
if isinstance(obj, Project):
serializer_class = ProjectSerializer
elif isinstance(obj, InventorySource):
serializer_class = InventorySourceSerializer
elif isinstance(obj, JobTemplate):
serializer_class = JobTemplateSerializer
elif isinstance(obj, SystemJobTemplate):
serializer_class = SystemJobTemplateSerializer
elif isinstance(obj, WorkflowJobTemplate):
serializer_class = WorkflowJobTemplateSerializer
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
# preserve links for list view
if self.parent:
serializer.parent = self.parent
serializer.polymorphic_base = self
# capabilities prefetch is only valid for these models
if isinstance(obj, (JobTemplate, WorkflowJobTemplate)):
serializer.capabilities_prefetch = self._capabilities_prefetch
else:
serializer.capabilities_prefetch = None
return serializer.to_representation(obj)
else:
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
|
def to_representation(self, obj):
serializer_class = None
if type(self) is UnifiedJobTemplateSerializer:
if isinstance(obj, Project):
serializer_class = ProjectSerializer
elif isinstance(obj, InventorySource):
serializer_class = InventorySourceSerializer
elif isinstance(obj, JobTemplate):
serializer_class = JobTemplateSerializer
elif isinstance(obj, SystemJobTemplate):
serializer_class = SystemJobTemplateSerializer
elif isinstance(obj, WorkflowJobTemplate):
serializer_class = WorkflowJobTemplateSerializer
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
# preserve links for list view
if self.parent:
serializer.parent = self.parent
serializer.polymorphic_base = self
# capabilities prefetch is only valid for these models
if not isinstance(obj, (JobTemplate, WorkflowJobTemplate)):
serializer.capabilities_prefetch = None
return serializer.to_representation(obj)
else:
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
|
https://github.com/ansible/awx/issues/1546
|
AttributeError: 'super' object has no attribute 'accessible_pk_qs'
2018-03-13 19:49:42,026 ERROR django.request Internal Server Error: /api/v2/inventory_sources/
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/lib/python2.7/site-packages/awx/wsgi.py", line 65, in _legacy_get_response
return super(AWXWSGIHandler, self)._legacy_get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
response = self._get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/utils/decorators.py", line 185, in inner
return func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 284, in dispatch
return super(APIView, self).dispatch(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 494, in dispatch
response = self.handle_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 454, in handle_exception
self.raise_uncaught_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 491, in dispatch
response = handler(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 201, in get
return self.list(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/mixins.py", line 45, in list
return self.get_paginated_response(serializer.data)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 742, in data
ret = super(ListSerializer, self).data
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 262, in data
self._data = self.to_representation(self.instance)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 660, in to_representation
self.child.to_representation(item) for item in iterable
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 2070, in to_representation
ret = super(InventorySourceSerializer, self).to_representation(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 683, in to_representation
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 504, in to_representation
ret[field.field_name] = field.to_representation(attribute)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/fields.py", line 1816, in to_representation
return method(value)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 346, in _get_summary_fields
return {} if obj is None else self.get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 2038, in get_summary_fields
summary_fields = super(InventorySourceSerializer, self).get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 1973, in get_summary_fields
summary_fields = super(InventorySourceOptionsSerializer, self).get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 407, in get_summary_fields
user_capabilities = self._obj_capability_dict(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 436, in _obj_capability_dict
model, qs, prefetch_list, view.request.user
File "/usr/lib/python2.7/site-packages/awx/main/utils/common.py", line 571, in prefetch_page_capabilities
filter_args.append(Q(**{'pk__in': model.accessible_pk_qs(user, '%s_role' % role_type)}))
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 199, in accessible_pk_qs
return super(UnifiedJobTemplate, cls).accessible_pk_qs(accessor, role_field)
AttributeError: 'super' object has no attribute 'accessible_pk_qs'
|
AttributeError
|
def to_internal_value(self, data):
# TODO: remove when API v1 is removed
if "credential_type" not in data and self.version == 1:
# If `credential_type` is not provided, assume the payload is a
# v1 credential payload that specifies a `kind` and a flat list
# of field values
#
# In this scenario, we should automatically detect the proper
# CredentialType based on the provided values
kind = data.get("kind", "ssh")
credential_type = CredentialType.from_v1_kind(kind, data)
if credential_type is None:
raise serializers.ValidationError(
{"kind": _('"%s" is not a valid choice' % kind)}
)
data["credential_type"] = credential_type.pk
value = OrderedDict(
{"credential_type": credential_type}.items()
+ super(CredentialSerializer, self).to_internal_value(data).items()
)
# Make a set of the keys in the POST/PUT payload
# - Subtract real fields (name, organization, inputs)
# - Subtract virtual v1 fields defined on the determined credential
# type (username, password, etc...)
# - Any leftovers are invalid for the determined credential type
valid_fields = set(super(CredentialSerializer, self).get_fields().keys())
valid_fields.update(V2CredentialFields().get_fields().keys())
valid_fields.update(["kind", "cloud"])
for field in (
set(data.keys()) - valid_fields - set(credential_type.defined_fields)
):
if data.get(field):
raise serializers.ValidationError(
{
"detail": _(
"'{field_name}' is not a valid field for {credential_type_name}"
).format(
field_name=field, credential_type_name=credential_type.name
)
}
)
value.pop("kind", None)
return value
return super(CredentialSerializer, self).to_internal_value(data)
|
def to_internal_value(self, data):
# TODO: remove when API v1 is removed
if "credential_type" not in data and self.version == 1:
# If `credential_type` is not provided, assume the payload is a
# v1 credential payload that specifies a `kind` and a flat list
# of field values
#
# In this scenario, we should automatically detect the proper
# CredentialType based on the provided values
kind = data.get("kind", "ssh")
credential_type = CredentialType.from_v1_kind(kind, data)
if credential_type is None:
raise serializers.ValidationError(
{"kind": _('"%s" is not a valid choice' % kind)}
)
data["credential_type"] = credential_type.pk
value = OrderedDict(
{"credential_type": credential_type}.items()
+ super(CredentialSerializer, self).to_internal_value(data).items()
)
# Make a set of the keys in the POST/PUT payload
# - Subtract real fields (name, organization, inputs)
# - Subtract virtual v1 fields defined on the determined credential
# type (username, password, etc...)
# - Any leftovers are invalid for the determined credential type
valid_fields = set(super(CredentialSerializer, self).get_fields().keys())
valid_fields.update(V2CredentialFields().get_fields().keys())
valid_fields.update(["kind", "cloud"])
for field in (
set(data.keys()) - valid_fields - set(credential_type.defined_fields)
):
if data.get(field):
raise serializers.ValidationError(
{
"detail": _("'%s' is not a valid field for %s")
% (field, credential_type.name)
}
)
value.pop("kind", None)
return value
return super(CredentialSerializer, self).to_internal_value(data)
|
https://github.com/ansible/awx/issues/1546
|
AttributeError: 'super' object has no attribute 'accessible_pk_qs'
2018-03-13 19:49:42,026 ERROR django.request Internal Server Error: /api/v2/inventory_sources/
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/lib/python2.7/site-packages/awx/wsgi.py", line 65, in _legacy_get_response
return super(AWXWSGIHandler, self)._legacy_get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
response = self._get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/utils/decorators.py", line 185, in inner
return func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 284, in dispatch
return super(APIView, self).dispatch(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 494, in dispatch
response = self.handle_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 454, in handle_exception
self.raise_uncaught_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 491, in dispatch
response = handler(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 201, in get
return self.list(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/mixins.py", line 45, in list
return self.get_paginated_response(serializer.data)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 742, in data
ret = super(ListSerializer, self).data
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 262, in data
self._data = self.to_representation(self.instance)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 660, in to_representation
self.child.to_representation(item) for item in iterable
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 2070, in to_representation
ret = super(InventorySourceSerializer, self).to_representation(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 683, in to_representation
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 504, in to_representation
ret[field.field_name] = field.to_representation(attribute)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/fields.py", line 1816, in to_representation
return method(value)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 346, in _get_summary_fields
return {} if obj is None else self.get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 2038, in get_summary_fields
summary_fields = super(InventorySourceSerializer, self).get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 1973, in get_summary_fields
summary_fields = super(InventorySourceOptionsSerializer, self).get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 407, in get_summary_fields
user_capabilities = self._obj_capability_dict(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 436, in _obj_capability_dict
model, qs, prefetch_list, view.request.user
File "/usr/lib/python2.7/site-packages/awx/main/utils/common.py", line 571, in prefetch_page_capabilities
filter_args.append(Q(**{'pk__in': model.accessible_pk_qs(user, '%s_role' % role_type)}))
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 199, in accessible_pk_qs
return super(UnifiedJobTemplate, cls).accessible_pk_qs(accessor, role_field)
AttributeError: 'super' object has no attribute 'accessible_pk_qs'
|
AttributeError
|
def __enum_validate__(validator, enums, instance, schema):
if instance not in enums:
yield jsonschema.exceptions.ValidationError(
_("'{value}' is not one of ['{allowed_values}']").format(
value=instance, allowed_values="', '".join(enums)
)
)
|
def __enum_validate__(validator, enums, instance, schema):
if instance not in enums:
yield jsonschema.exceptions.ValidationError(
_("'%s' is not one of ['%s']") % (instance, "', '".join(enums))
)
|
https://github.com/ansible/awx/issues/1546
|
AttributeError: 'super' object has no attribute 'accessible_pk_qs'
2018-03-13 19:49:42,026 ERROR django.request Internal Server Error: /api/v2/inventory_sources/
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/lib/python2.7/site-packages/awx/wsgi.py", line 65, in _legacy_get_response
return super(AWXWSGIHandler, self)._legacy_get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
response = self._get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/utils/decorators.py", line 185, in inner
return func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 284, in dispatch
return super(APIView, self).dispatch(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 494, in dispatch
response = self.handle_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 454, in handle_exception
self.raise_uncaught_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 491, in dispatch
response = handler(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 201, in get
return self.list(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/mixins.py", line 45, in list
return self.get_paginated_response(serializer.data)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 742, in data
ret = super(ListSerializer, self).data
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 262, in data
self._data = self.to_representation(self.instance)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 660, in to_representation
self.child.to_representation(item) for item in iterable
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 2070, in to_representation
ret = super(InventorySourceSerializer, self).to_representation(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 683, in to_representation
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 504, in to_representation
ret[field.field_name] = field.to_representation(attribute)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/fields.py", line 1816, in to_representation
return method(value)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 346, in _get_summary_fields
return {} if obj is None else self.get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 2038, in get_summary_fields
summary_fields = super(InventorySourceSerializer, self).get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 1973, in get_summary_fields
summary_fields = super(InventorySourceOptionsSerializer, self).get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 407, in get_summary_fields
user_capabilities = self._obj_capability_dict(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 436, in _obj_capability_dict
model, qs, prefetch_list, view.request.user
File "/usr/lib/python2.7/site-packages/awx/main/utils/common.py", line 571, in prefetch_page_capabilities
filter_args.append(Q(**{'pk__in': model.accessible_pk_qs(user, '%s_role' % role_type)}))
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 199, in accessible_pk_qs
return super(UnifiedJobTemplate, cls).accessible_pk_qs(accessor, role_field)
AttributeError: 'super' object has no attribute 'accessible_pk_qs'
|
AttributeError
|
def validate(self, value, model_instance):
if (
isinstance(value, dict)
and "dependencies" in value
and not model_instance.managed_by_tower
):
raise django_exceptions.ValidationError(
_("'dependencies' is not supported for custom credentials."),
code="invalid",
params={"value": value},
)
super(CredentialTypeInputField, self).validate(value, model_instance)
ids = {}
for field in value.get("fields", []):
id_ = field.get("id")
if id_ == "tower":
raise django_exceptions.ValidationError(
_('"tower" is a reserved field name'),
code="invalid",
params={"value": value},
)
if id_ in ids:
raise django_exceptions.ValidationError(
_("field IDs must be unique (%s)" % id_),
code="invalid",
params={"value": value},
)
ids[id_] = True
if "type" not in field:
# If no type is specified, default to string
field["type"] = "string"
if field["type"] == "become_method":
if not model_instance.managed_by_tower:
raise django_exceptions.ValidationError(
_("become_method is a reserved type name"),
code="invalid",
params={"value": value},
)
else:
field.pop("type")
field["choices"] = CHOICES_PRIVILEGE_ESCALATION_METHODS
for key in (
"choices",
"multiline",
"format",
"secret",
):
if key in field and field["type"] != "string":
raise django_exceptions.ValidationError(
_(
"{sub_key} not allowed for {element_type} type ({element_id})".format(
sub_key=key,
element_type=field["type"],
element_id=field["id"],
)
),
code="invalid",
params={"value": value},
)
|
def validate(self, value, model_instance):
if (
isinstance(value, dict)
and "dependencies" in value
and not model_instance.managed_by_tower
):
raise django_exceptions.ValidationError(
_("'dependencies' is not supported for custom credentials."),
code="invalid",
params={"value": value},
)
super(CredentialTypeInputField, self).validate(value, model_instance)
ids = {}
for field in value.get("fields", []):
id_ = field.get("id")
if id_ == "tower":
raise django_exceptions.ValidationError(
_('"tower" is a reserved field name'),
code="invalid",
params={"value": value},
)
if id_ in ids:
raise django_exceptions.ValidationError(
_("field IDs must be unique (%s)" % id_),
code="invalid",
params={"value": value},
)
ids[id_] = True
if "type" not in field:
# If no type is specified, default to string
field["type"] = "string"
if field["type"] == "become_method":
if not model_instance.managed_by_tower:
raise django_exceptions.ValidationError(
_("become_method is a reserved type name"),
code="invalid",
params={"value": value},
)
else:
field.pop("type")
field["choices"] = CHOICES_PRIVILEGE_ESCALATION_METHODS
for key in (
"choices",
"multiline",
"format",
"secret",
):
if key in field and field["type"] != "string":
raise django_exceptions.ValidationError(
_(
"%s not allowed for %s type (%s)"
% (key, field["type"], field["id"])
),
code="invalid",
params={"value": value},
)
|
https://github.com/ansible/awx/issues/1546
|
AttributeError: 'super' object has no attribute 'accessible_pk_qs'
2018-03-13 19:49:42,026 ERROR django.request Internal Server Error: /api/v2/inventory_sources/
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/lib/python2.7/site-packages/awx/wsgi.py", line 65, in _legacy_get_response
return super(AWXWSGIHandler, self)._legacy_get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
response = self._get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/utils/decorators.py", line 185, in inner
return func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 284, in dispatch
return super(APIView, self).dispatch(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 494, in dispatch
response = self.handle_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 454, in handle_exception
self.raise_uncaught_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 491, in dispatch
response = handler(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 201, in get
return self.list(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/mixins.py", line 45, in list
return self.get_paginated_response(serializer.data)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 742, in data
ret = super(ListSerializer, self).data
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 262, in data
self._data = self.to_representation(self.instance)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 660, in to_representation
self.child.to_representation(item) for item in iterable
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 2070, in to_representation
ret = super(InventorySourceSerializer, self).to_representation(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 683, in to_representation
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 504, in to_representation
ret[field.field_name] = field.to_representation(attribute)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/fields.py", line 1816, in to_representation
return method(value)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 346, in _get_summary_fields
return {} if obj is None else self.get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 2038, in get_summary_fields
summary_fields = super(InventorySourceSerializer, self).get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 1973, in get_summary_fields
summary_fields = super(InventorySourceOptionsSerializer, self).get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 407, in get_summary_fields
user_capabilities = self._obj_capability_dict(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 436, in _obj_capability_dict
model, qs, prefetch_list, view.request.user
File "/usr/lib/python2.7/site-packages/awx/main/utils/common.py", line 571, in prefetch_page_capabilities
filter_args.append(Q(**{'pk__in': model.accessible_pk_qs(user, '%s_role' % role_type)}))
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 199, in accessible_pk_qs
return super(UnifiedJobTemplate, cls).accessible_pk_qs(accessor, role_field)
AttributeError: 'super' object has no attribute 'accessible_pk_qs'
|
AttributeError
|
def validate(self, value, model_instance):
super(CredentialTypeInjectorField, self).validate(value, model_instance)
# make sure the inputs are valid first
try:
CredentialTypeInputField().validate(model_instance.inputs, model_instance)
except django_exceptions.ValidationError:
# If `model_instance.inputs` itself is invalid, we can't make an
# estimation as to whether our Jinja templates contain valid field
# names; don't continue
return
# In addition to basic schema validation, search the injector fields
# for template variables and make sure they match the fields defined in
# the inputs
valid_namespace = dict(
(field, "EXAMPLE") for field in model_instance.defined_fields
)
class TowerNamespace:
filename = None
valid_namespace["tower"] = TowerNamespace()
# ensure either single file or multi-file syntax is used (but not both)
template_names = [
x for x in value.get("file", {}).keys() if x.startswith("template")
]
if "template" in template_names and len(template_names) > 1:
raise django_exceptions.ValidationError(
_("Must use multi-file syntax when injecting multiple files"),
code="invalid",
params={"value": value},
)
if "template" not in template_names:
valid_namespace["tower"].filename = TowerNamespace()
for template_name in template_names:
template_name = template_name.split(".")[1]
setattr(valid_namespace["tower"].filename, template_name, "EXAMPLE")
for type_, injector in value.items():
for key, tmpl in injector.items():
try:
Environment(undefined=StrictUndefined).from_string(tmpl).render(
valid_namespace
)
except UndefinedError as e:
raise django_exceptions.ValidationError(
_("{sub_key} uses an undefined field ({error_msg})").format(
sub_key=key, error_msg=e
),
code="invalid",
params={"value": value},
)
except TemplateSyntaxError as e:
raise django_exceptions.ValidationError(
_(
"Syntax error rendering template for {sub_key} inside of {type} ({error_msg})"
).format(sub_key=key, type=type_, error_msg=e),
code="invalid",
params={"value": value},
)
|
def validate(self, value, model_instance):
super(CredentialTypeInjectorField, self).validate(value, model_instance)
# make sure the inputs are valid first
try:
CredentialTypeInputField().validate(model_instance.inputs, model_instance)
except django_exceptions.ValidationError:
# If `model_instance.inputs` itself is invalid, we can't make an
# estimation as to whether our Jinja templates contain valid field
# names; don't continue
return
# In addition to basic schema validation, search the injector fields
# for template variables and make sure they match the fields defined in
# the inputs
valid_namespace = dict(
(field, "EXAMPLE") for field in model_instance.defined_fields
)
class TowerNamespace:
filename = None
valid_namespace["tower"] = TowerNamespace()
# ensure either single file or multi-file syntax is used (but not both)
template_names = [
x for x in value.get("file", {}).keys() if x.startswith("template")
]
if "template" in template_names and len(template_names) > 1:
raise django_exceptions.ValidationError(
_("Must use multi-file syntax when injecting multiple files"),
code="invalid",
params={"value": value},
)
if "template" not in template_names:
valid_namespace["tower"].filename = TowerNamespace()
for template_name in template_names:
template_name = template_name.split(".")[1]
setattr(valid_namespace["tower"].filename, template_name, "EXAMPLE")
for type_, injector in value.items():
for key, tmpl in injector.items():
try:
Environment(undefined=StrictUndefined).from_string(tmpl).render(
valid_namespace
)
except UndefinedError as e:
raise django_exceptions.ValidationError(
_("%s uses an undefined field (%s)") % (key, e),
code="invalid",
params={"value": value},
)
except TemplateSyntaxError as e:
raise django_exceptions.ValidationError(
_("Syntax error rendering template for %s inside of %s (%s)")
% (key, type_, e),
code="invalid",
params={"value": value},
)
|
https://github.com/ansible/awx/issues/1546
|
AttributeError: 'super' object has no attribute 'accessible_pk_qs'
2018-03-13 19:49:42,026 ERROR django.request Internal Server Error: /api/v2/inventory_sources/
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/lib/python2.7/site-packages/awx/wsgi.py", line 65, in _legacy_get_response
return super(AWXWSGIHandler, self)._legacy_get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
response = self._get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/utils/decorators.py", line 185, in inner
return func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 284, in dispatch
return super(APIView, self).dispatch(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 494, in dispatch
response = self.handle_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 454, in handle_exception
self.raise_uncaught_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 491, in dispatch
response = handler(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 201, in get
return self.list(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/mixins.py", line 45, in list
return self.get_paginated_response(serializer.data)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 742, in data
ret = super(ListSerializer, self).data
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 262, in data
self._data = self.to_representation(self.instance)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 660, in to_representation
self.child.to_representation(item) for item in iterable
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 2070, in to_representation
ret = super(InventorySourceSerializer, self).to_representation(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 683, in to_representation
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 504, in to_representation
ret[field.field_name] = field.to_representation(attribute)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/fields.py", line 1816, in to_representation
return method(value)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 346, in _get_summary_fields
return {} if obj is None else self.get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 2038, in get_summary_fields
summary_fields = super(InventorySourceSerializer, self).get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 1973, in get_summary_fields
summary_fields = super(InventorySourceOptionsSerializer, self).get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 407, in get_summary_fields
user_capabilities = self._obj_capability_dict(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 436, in _obj_capability_dict
model, qs, prefetch_list, view.request.user
File "/usr/lib/python2.7/site-packages/awx/main/utils/common.py", line 571, in prefetch_page_capabilities
filter_args.append(Q(**{'pk__in': model.accessible_pk_qs(user, '%s_role' % role_type)}))
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 199, in accessible_pk_qs
return super(UnifiedJobTemplate, cls).accessible_pk_qs(accessor, role_field)
AttributeError: 'super' object has no attribute 'accessible_pk_qs'
|
AttributeError
|
def update_raw_data(self, data):
data = super(JobRelaunch, self).update_raw_data(data)
try:
obj = self.get_object()
except PermissionDenied:
return data
if obj:
needed_passwords = obj.passwords_needed_to_start
if needed_passwords:
data["credential_passwords"] = {}
for p in needed_passwords:
data["credential_passwords"][p] = ""
else:
data.pop("credential_passwords", None)
return data
|
def update_raw_data(self, data):
data = super(JobRelaunch, self).update_raw_data(data)
try:
obj = self.get_object()
except PermissionDenied:
return data
if obj:
needed_passwords = obj.passwords_needed_to_start
if needed_passwords:
data["credential_passwords"] = {}
for p in needed_passwords:
data["credential_passwords"][p] = ""
else:
data.pop("credential_passwords")
return data
|
https://github.com/ansible/awx/issues/1393
|
web_1 | error: [Errno 111] Connection refused
web_1 | 2018-02-28 15:04:22,988 ERROR django.request Internal Server Error: /api/v2/instances/1/
web_1 | Traceback (most recent call last):
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
web_1 | response = get_response(request)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
web_1 | response = self._get_response(request)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
web_1 | response = self.process_exception_by_middleware(e, request)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
web_1 | response = wrapped_callback(request, *callback_args, **callback_kwargs)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/utils/decorators.py", line 185, in inner
web_1 | return func(*args, **kwargs)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
web_1 | return view_func(*args, **kwargs)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
web_1 | return self.dispatch(request, *args, **kwargs)
web_1 | File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 284, in dispatch
web_1 | return super(APIView, self).dispatch(request, *args, **kwargs)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 489, in dispatch
web_1 | response = self.handle_exception(exc)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 449, in handle_exception
web_1 | self.raise_uncaught_exception(exc)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 486, in dispatch
web_1 | response = handler(request, *args, **kwargs)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 257, in put
web_1 | return self.update(request, *args, **kwargs)
web_1 | File "/usr/lib/python2.7/site-packages/awx/api/views.py", line 594, in update
web_1 | handle_ha_toplogy_changes.apply_async()
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/task.py", line 573, in apply_async
web_1 | **dict(self._get_exec_options(), **options)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/base.py", line 354, in send_task
web_1 | reply_to=reply_to or self.oid, **options
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/amqp.py", line 310, in publish_task
web_1 | **kwargs
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/messaging.py", line 172, in publish
web_1 | routing_key, mandatory, immediate, exchange, declare)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 470, in _ensured
web_1 | interval_max)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 382, in ensure_connection
web_1 | interval_start, interval_step, interval_max, callback)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/utils/__init__.py", line 246, in retry_over_time
web_1 | return fun(*args, **kwargs)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 250, in connect
web_1 | return self.connection
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 756, in connection
web_1 | self._connection = self._establish_connection()
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 711, in _establish_connection
web_1 | conn = self.transport.establish_connection()
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/transport/pyamqp.py", line 116, in establish_connection
web_1 | conn = self.Connection(**opts)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/connection.py", line 165, in __init__
web_1 | self.transport = self.Transport(host, connect_timeout, ssl)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/connection.py", line 186, in Transport
web_1 | return create_transport(host, connect_timeout, ssl)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/transport.py", line 299, in create_transport
web_1 | return TCPTransport(host, connect_timeout)
web_1 | File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/transport.py", line 95, in __init__
web_1 | raise socket.error(last_err)
|
socket.error
|
def _get_enabled(self, from_dict, default=None):
"""
Retrieve the enabled state from the given dict of host variables.
The enabled variable may be specified as 'foo.bar', in which case
the lookup will traverse into nested dicts, equivalent to:
from_dict.get('foo', {}).get('bar', default)
"""
enabled = default
if getattr(self, "enabled_var", None):
default = object()
for key in self.enabled_var.split("."):
if not hasattr(from_dict, "get"):
enabled = default
break
enabled = from_dict.get(key, default)
from_dict = enabled
if enabled is not default:
enabled_value = getattr(self, "enabled_value", None)
if enabled_value is not None:
enabled = bool(unicode(enabled_value) == unicode(enabled))
else:
enabled = bool(enabled)
if enabled is default:
return None
elif isinstance(enabled, bool):
return enabled
else:
raise NotImplementedError("Value of enabled {} not understood.".format(enabled))
|
def _get_enabled(self, from_dict, default=None):
"""
Retrieve the enabled state from the given dict of host variables.
The enabled variable may be specified as 'foo.bar', in which case
the lookup will traverse into nested dicts, equivalent to:
from_dict.get('foo', {}).get('bar', default)
"""
enabled = default
if getattr(self, "enabled_var", None):
default = object()
for key in self.enabled_var.split("."):
if not hasattr(from_dict, "get"):
enabled = default
break
enabled = from_dict.get(key, default)
from_dict = enabled
if enabled is not default:
enabled_value = getattr(self, "enabled_value", None)
if enabled_value is not None:
enabled = bool(unicode(enabled_value) == unicode(enabled))
else:
enabled = bool(enabled)
return enabled
|
https://github.com/ansible/awx/issues/705
|
2017-11-23 01:44:04,433 INFO awx.main.commands.inventory_import Updating inventory 2: CF
2017-11-23 01:44:04,472 INFO awx.main.commands.inventory_import Reading Ansible inventory source: /usr/lib/python2.7/site-packages/awx/plugins/inventory/cloudforms.py
2017-11-23 01:44:34,698 INFO awx.main.commands.inventory_import Processing JSON output...
2017-11-23 01:44:34,720 INFO awx.main.commands.inventory_import Loaded 322 groups, 266 hosts
...
2017-11-23 01:44:34,739 WARNING awx.main.commands.inventory_import Host \"<redacted1>\" has no \"id\" variable
2017-11-23 01:44:34,739 WARNING awx.main.commands.inventory_import Host \"<redacted2>\" has no \"id\" variable
2017-11-23 01:44:34,739 WARNING awx.main.commands.inventory_import Host \"<redacted3>\" has no \"id\" variable
...
2017-11-23 01:44:38,254 INFO awx.main.commands.inventory_import Group \"<redacted4>_ovf\" added
2017-11-23 01:44:38,261 INFO awx.main.commands.inventory_import Group \"<redacted5>_ovf\" added
2017-11-23 01:44:38,269 INFO awx.main.commands.inventory_import Group \"location\" added
2017-11-23 01:44:38,276 INFO awx.main.commands.inventory_import Group \"redhat\" added
2017-11-23 01:44:38,283 INFO awx.main.commands.inventory_import Group \"tags\" added
2017-11-23 01:44:38,290 INFO awx.main.commands.inventory_import Group \"type\" added
2017-11-23 01:44:38,298 INFO awx.main.commands.inventory_import Group \"vendor\" added
Traceback (most recent call last):
File \"/usr/bin/awx-manage\", line 9, in <module>
load_entry_point('awx==1.0.1.225', 'console_scripts', 'awx-manage')()
File \"/usr/lib/python2.7/site-packages/awx/__init__.py\", line 109, in manage
execute_from_command_line(sys.argv)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/__init__.py\", line 364, in execute_from_command_line
utility.execute()
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/__init__.py\", line 356, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/base.py\", line 283, in run_from_argv
self.execute(*args, **cmd_options)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/base.py\", line 330, in execute
output = self.handle(*args, **options)
File \"/usr/lib/python2.7/site-packages/awx/main/management/commands/inventory_import.py\", line 1020, in handle
self.load_into_database()
File \"/usr/lib/python2.7/site-packages/awx/main/management/commands/inventory_import.py\", line 892, in load_into_database
self._create_update_hosts()
File \"/usr/lib/python2.7/site-packages/awx/main/management/commands/inventory_import.py\", line 798, in _create_update_hosts
db_host = self.inventory.hosts.update_or_create(name=mem_host_name, defaults=host_attrs)[0]
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/fields/related_descriptors.py\", line 665, in update_or_create
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/manager.py\", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/query.py\", line 482, in update_or_create
obj, created = self._create_object_from_params(lookup, params)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/query.py\", line 498, in _create_object_from_params
obj = self.create(**params)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/query.py\", line 394, in create
obj.save(force_insert=True, using=self.db)
File \"/usr/lib/python2.7/site-packages/awx/main/models/inventory.py\", line 665, in save
super(Host, self).save(*args, **kwargs)
File \"/usr/lib/python2.7/site-packages/awx/main/models/base.py\", line 264, in save
super(PrimordialModel, self).save(*args, **kwargs)
File \"/usr/lib/python2.7/site-packages/awx/main/models/base.py\", line 159, in save
super(CreatedModifiedModel, self).save(*args, **kwargs)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/base.py\", line 808, in save
force_update=force_update, update_fields=update_fields)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/base.py\", line 838, in save_base
updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/base.py\", line 924, in _save_table
result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/base.py\", line 963, in _do_insert
using=using, raw=raw)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/manager.py\", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/query.py\", line 1076, in _insert
return query.get_compiler(using=using).execute_sql(return_id)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/sql/compiler.py\", line 1106, in execute_sql
for sql, params in self.as_sql():
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/sql/compiler.py\", line 1059, in as_sql
for obj in self.query.objs
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/sql/compiler.py\", line 998, in prepare_value
value = field.get_db_prep_save(value, connection=self.connection)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/fields/__init__.py\", line 770, in get_db_prep_save
prepared=False)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/fields/__init__.py\", line 762, in get_db_prep_value
value = self.get_prep_value(value)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/fields/__init__.py\", line 1043, in get_prep_value
return self.to_python(value)
File \"/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/fields/__init__.py\", line 1036, in to_python
params={'value': value},
django.core.exceptions.ValidationError: [u\"'<object object at 0x6aa0f40>' value must be either True or False.\"]
|
django.core.exceptions.ValidationError
|
def _default_steadystate_args():
def_args = {
"sparse": True,
"use_rcm": False,
"use_wbm": False,
"weight": None,
"use_precond": False,
"all_states": False,
"M": None,
"x0": None,
"drop_tol": 1e-4,
"fill_factor": 100,
"diag_pivot_thresh": None,
"maxiter": 1000,
"tol": 1e-12,
"matol": 1e-15,
"mtol": None,
"permc_spec": "COLAMD",
"ILU_MILU": "smilu_2",
"restart": 20,
"return_info": False,
"info": _empty_info_dict(),
"verbose": False,
"solver": "scipy",
}
return def_args
|
def _default_steadystate_args():
def_args = {
"sparse": True,
"use_rcm": False,
"use_wbm": False,
"weight": None,
"use_precond": False,
"all_states": False,
"M": None,
"x0": None,
"drop_tol": 1e-4,
"fill_factor": 100,
"diag_pivot_thresh": None,
"maxiter": 1000,
"tol": 1e-12,
"permc_spec": "COLAMD",
"ILU_MILU": "smilu_2",
"restart": 20,
"return_info": False,
"info": _empty_info_dict(),
"verbose": False,
"solver": "scipy",
}
return def_args
|
https://github.com/qutip/qutip/issues/862
|
..................................................
======================================================================
ERROR: Steady state: Thermal qubit - power-gmres solver
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/Users/shahnawaz/dev/qutip/qutip/tests/test_steadystate.py", line 145, in test_qubit_power_gmres
rho_ss = steadystate(H, c_op_list, method='power-gmres')
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 298, in steadystate
return _steadystate_power(A, ss_args)
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 863, in _steadystate_power
v = v / la.norm(v, np.inf)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/scipy/linalg/misc.py", line 137, in norm
a = np.asarray_chkfinite(a)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/numpy/lib/function_base.py", line 1233, in asarray_chkfinite
"array must not contain infs or NaNs")
ValueError: array must not contain infs or NaNs
----------------------------------------------------------------------
Ran 561 tests in 576.175s
FAILED (SKIP=19, errors=1)
|
ValueError
|
def _mkl_steadystate_args():
def_args = {
"max_iter_refine": 10,
"scaling_vectors": True,
"weighted_matching": True,
"return_info": False,
"info": _empty_info_dict(),
"verbose": False,
"solver": "mkl",
"use_rcm": False,
"use_wbm": False,
"weight": None,
"tol": 1e-12,
"matol": 1e-15,
"mtol": None,
"maxiter": 1000,
}
return def_args
|
def _mkl_steadystate_args():
def_args = {
"max_iter_refine": 10,
"scaling_vectors": True,
"weighted_matching": True,
"return_info": False,
"info": _empty_info_dict(),
"verbose": False,
"solver": "mkl",
"use_rcm": False,
"use_wbm": False,
"weight": None,
"tol": 1e-12,
"maxiter": 1000,
}
return def_args
|
https://github.com/qutip/qutip/issues/862
|
..................................................
======================================================================
ERROR: Steady state: Thermal qubit - power-gmres solver
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/Users/shahnawaz/dev/qutip/qutip/tests/test_steadystate.py", line 145, in test_qubit_power_gmres
rho_ss = steadystate(H, c_op_list, method='power-gmres')
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 298, in steadystate
return _steadystate_power(A, ss_args)
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 863, in _steadystate_power
v = v / la.norm(v, np.inf)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/scipy/linalg/misc.py", line 137, in norm
a = np.asarray_chkfinite(a)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/numpy/lib/function_base.py", line 1233, in asarray_chkfinite
"array must not contain infs or NaNs")
ValueError: array must not contain infs or NaNs
----------------------------------------------------------------------
Ran 561 tests in 576.175s
FAILED (SKIP=19, errors=1)
|
ValueError
|
def steadystate(A, c_op_list=[], method="direct", solver=None, **kwargs):
"""Calculates the steady state for quantum evolution subject to the
supplied Hamiltonian or Liouvillian operator and (if given a Hamiltonian) a
list of collapse operators.
If the user passes a Hamiltonian then it, along with the list of collapse
operators, will be converted into a Liouvillian operator in Lindblad form.
Parameters
----------
A : qobj
A Hamiltonian or Liouvillian operator.
c_op_list : list
A list of collapse operators.
solver : str {None, 'scipy', 'mkl'}
Selects the sparse solver to use. Default is auto-select
based on the availability of the MKL library.
method : str {'direct', 'eigen', 'iterative-gmres',
'iterative-lgmres', 'iterative-bicgstab', 'svd', 'power',
'power-gmres', 'power-lgmres', 'power-bicgstab'}
Method for solving the underlying linear equation. Direct LU solver
'direct' (default), sparse eigenvalue problem 'eigen',
iterative GMRES method 'iterative-gmres', iterative LGMRES method
'iterative-lgmres', iterative BICGSTAB method 'iterative-bicgstab',
SVD 'svd' (dense), or inverse-power method 'power'. The iterative
power methods 'power-gmres', 'power-lgmres', 'power-bicgstab' use
the same solvers as their direct counterparts.
return_info : bool, optional, default = False
Return a dictionary of solver-specific infomation about the
solution and how it was obtained.
sparse : bool, optional, default = True
Solve for the steady state using sparse algorithms. If set to False,
the underlying Liouvillian operator will be converted into a dense
matrix. Use only for 'smaller' systems.
use_rcm : bool, optional, default = False
Use reverse Cuthill-Mckee reordering to minimize fill-in in the
LU factorization of the Liouvillian.
use_wbm : bool, optional, default = False
Use Weighted Bipartite Matching reordering to make the Liouvillian
diagonally dominant. This is useful for iterative preconditioners
only, and is set to ``True`` by default when finding a preconditioner.
weight : float, optional
Sets the size of the elements used for adding the unity trace condition
to the linear solvers. This is set to the average abs value of the
Liouvillian elements if not specified by the user.
max_iter_refine : int {10}
MKL ONLY. Max. number of iterative refinements to perform.
scaling_vectors : bool {True, False}
MKL ONLY. Scale matrix to unit norm columns and rows.
weighted_matching : bool {True, False}
MKL ONLY. Use weighted matching to better condition diagonal.
x0 : ndarray, optional
ITERATIVE ONLY. Initial guess for solution vector.
maxiter : int, optional, default=1000
ITERATIVE ONLY. Maximum number of iterations to perform.
tol : float, optional, default=1e-12
ITERATIVE ONLY. Tolerance used for terminating solver.
mtol : float, optional, default=None
ITERATIVE 'power' methods ONLY. Tolerance for lu solve method.
If None given then `max(0.1*tol, 1e-15)` is used
matol : float, optional, default=1e-15
ITERATIVE ONLY. Absolute tolerance for lu solve method.
permc_spec : str, optional, default='COLAMD'
ITERATIVE ONLY. Column ordering used internally by superLU for the
'direct' LU decomposition method. Options include 'COLAMD' and
'NATURAL'. If using RCM then this is set to 'NATURAL' automatically
unless explicitly specified.
use_precond : bool optional, default = False
ITERATIVE ONLY. Use an incomplete sparse LU decomposition as a
preconditioner for the 'iterative' GMRES and BICG solvers.
Speeds up convergence time by orders of magnitude in many cases.
M : {sparse matrix, dense matrix, LinearOperator}, optional
ITERATIVE ONLY. Preconditioner for A. The preconditioner should
approximate the inverse of A. Effective preconditioning can
dramatically improve the rate of convergence for iterative methods.
If no preconditioner is given and ``use_precond = True``, then one
is generated automatically.
fill_factor : float, optional, default = 100
ITERATIVE ONLY. Specifies the fill ratio upper bound (>=1) of the iLU
preconditioner. Lower values save memory at the cost of longer
execution times and a possible singular factorization.
drop_tol : float, optional, default = 1e-4
ITERATIVE ONLY. Sets the threshold for the magnitude of preconditioner
elements that should be dropped. Can be reduced for a courser
factorization at the cost of an increased number of iterations, and a
possible singular factorization.
diag_pivot_thresh : float, optional, default = None
ITERATIVE ONLY. Sets the threshold between [0,1] for which diagonal
elements are considered acceptable pivot points when using a
preconditioner. A value of zero forces the pivot to be the diagonal
element.
ILU_MILU : str, optional, default = 'smilu_2'
ITERATIVE ONLY. Selects the incomplete LU decomposition method
algoithm used in creating the preconditoner. Should only be used by
advanced users.
Returns
-------
dm : qobj
Steady state density matrix.
info : dict, optional
Dictionary containing solver-specific information about the solution.
Notes
-----
The SVD method works only for dense operators (i.e. small systems).
"""
if solver is None:
solver = "scipy"
if settings.has_mkl:
if method in ["direct", "power"]:
solver = "mkl"
elif solver == "mkl" and (method not in ["direct", "power"]):
raise Exception("MKL solver only for direct or power methods.")
elif solver not in ["scipy", "mkl"]:
raise Exception("Invalid solver kwarg.")
if solver == "scipy":
ss_args = _default_steadystate_args()
elif solver == "mkl":
ss_args = _mkl_steadystate_args()
else:
raise Exception("Invalid solver keyword argument.")
ss_args["method"] = method
ss_args["info"]["solver"] = ss_args["solver"]
ss_args["info"]["method"] = ss_args["method"]
for key in kwargs.keys():
if key in ss_args.keys():
ss_args[key] = kwargs[key]
else:
raise Exception(
"Invalid keyword argument '" + key + "' passed to steadystate."
)
# Set column perm to NATURAL if using RCM and not specified by user
if ss_args["use_rcm"] and ("permc_spec" not in kwargs.keys()):
ss_args["permc_spec"] = "NATURAL"
# Create & check Liouvillian
A = _steadystate_setup(A, c_op_list)
# Set weight parameter to avg abs val in L if not set explicitly
if "weight" not in kwargs.keys():
ss_args["weight"] = np.mean(np.abs(A.data.data.max()))
ss_args["info"]["weight"] = ss_args["weight"]
if ss_args["method"] == "direct":
if (ss_args["solver"] == "scipy" and ss_args["sparse"]) or ss_args[
"solver"
] == "mkl":
return _steadystate_direct_sparse(A, ss_args)
else:
return _steadystate_direct_dense(A, ss_args)
elif ss_args["method"] == "eigen":
return _steadystate_eigen(A, ss_args)
elif ss_args["method"] in [
"iterative-gmres",
"iterative-lgmres",
"iterative-bicgstab",
]:
return _steadystate_iterative(A, ss_args)
elif ss_args["method"] == "svd":
return _steadystate_svd_dense(A, ss_args)
elif ss_args["method"] in [
"power",
"power-gmres",
"power-lgmres",
"power-bicgstab",
]:
return _steadystate_power(A, ss_args)
else:
raise ValueError("Invalid method argument for steadystate.")
|
def steadystate(A, c_op_list=[], method="direct", solver=None, **kwargs):
"""Calculates the steady state for quantum evolution subject to the
supplied Hamiltonian or Liouvillian operator and (if given a Hamiltonian) a
list of collapse operators.
If the user passes a Hamiltonian then it, along with the list of collapse
operators, will be converted into a Liouvillian operator in Lindblad form.
Parameters
----------
A : qobj
A Hamiltonian or Liouvillian operator.
c_op_list : list
A list of collapse operators.
solver : str {None, 'scipy', 'mkl'}
Selects the sparse solver to use. Default is auto-select
based on the availability of the MKL library.
method : str {'direct', 'eigen', 'iterative-gmres',
'iterative-lgmres', 'iterative-bicgstab', 'svd', 'power',
'power-gmres', 'power-lgmres', 'power-bicgstab'}
Method for solving the underlying linear equation. Direct LU solver
'direct' (default), sparse eigenvalue problem 'eigen',
iterative GMRES method 'iterative-gmres', iterative LGMRES method
'iterative-lgmres', iterative BICGSTAB method 'iterative-bicgstab',
SVD 'svd' (dense), or inverse-power method 'power'. The iterative
power methods 'power-gmres', 'power-lgmres', 'power-bicgstab' use
the same solvers as their direct counterparts.
return_info : bool, optional, default = False
Return a dictionary of solver-specific infomation about the
solution and how it was obtained.
sparse : bool, optional, default = True
Solve for the steady state using sparse algorithms. If set to False,
the underlying Liouvillian operator will be converted into a dense
matrix. Use only for 'smaller' systems.
use_rcm : bool, optional, default = False
Use reverse Cuthill-Mckee reordering to minimize fill-in in the
LU factorization of the Liouvillian.
use_wbm : bool, optional, default = False
Use Weighted Bipartite Matching reordering to make the Liouvillian
diagonally dominant. This is useful for iterative preconditioners
only, and is set to ``True`` by default when finding a preconditioner.
weight : float, optional
Sets the size of the elements used for adding the unity trace condition
to the linear solvers. This is set to the average abs value of the
Liouvillian elements if not specified by the user.
max_iter_refine : int {10}
MKL ONLY. Max. number of iterative refinements to perform.
scaling_vectors : bool {True, False}
MKL ONLY. Scale matrix to unit norm columns and rows.
weighted_matching : bool {True, False}
MKL ONLY. Use weighted matching to better condition diagonal.
x0 : ndarray, optional
ITERATIVE ONLY. Initial guess for solution vector.
maxiter : int, optional, default=1000
ITERATIVE ONLY. Maximum number of iterations to perform.
tol : float, optional, default=1e-12
ITERATIVE ONLY. Tolerance used for terminating solver.
permc_spec : str, optional, default='COLAMD'
ITERATIVE ONLY. Column ordering used internally by superLU for the
'direct' LU decomposition method. Options include 'COLAMD' and
'NATURAL'. If using RCM then this is set to 'NATURAL' automatically
unless explicitly specified.
use_precond : bool optional, default = False
ITERATIVE ONLY. Use an incomplete sparse LU decomposition as a
preconditioner for the 'iterative' GMRES and BICG solvers.
Speeds up convergence time by orders of magnitude in many cases.
M : {sparse matrix, dense matrix, LinearOperator}, optional
ITERATIVE ONLY. Preconditioner for A. The preconditioner should
approximate the inverse of A. Effective preconditioning can
dramatically improve the rate of convergence for iterative methods.
If no preconditioner is given and ``use_precond = True``, then one
is generated automatically.
fill_factor : float, optional, default = 100
ITERATIVE ONLY. Specifies the fill ratio upper bound (>=1) of the iLU
preconditioner. Lower values save memory at the cost of longer
execution times and a possible singular factorization.
drop_tol : float, optional, default = 1e-4
ITERATIVE ONLY. Sets the threshold for the magnitude of preconditioner
elements that should be dropped. Can be reduced for a courser
factorization at the cost of an increased number of iterations, and a
possible singular factorization.
diag_pivot_thresh : float, optional, default = None
ITERATIVE ONLY. Sets the threshold between [0,1] for which diagonal
elements are considered acceptable pivot points when using a
preconditioner. A value of zero forces the pivot to be the diagonal
element.
ILU_MILU : str, optional, default = 'smilu_2'
ITERATIVE ONLY. Selects the incomplete LU decomposition method
algoithm used in creating the preconditoner. Should only be used by
advanced users.
Returns
-------
dm : qobj
Steady state density matrix.
info : dict, optional
Dictionary containing solver-specific information about the solution.
Notes
-----
The SVD method works only for dense operators (i.e. small systems).
"""
if solver is None:
solver = "scipy"
if settings.has_mkl:
if method in ["direct", "power"]:
solver = "mkl"
elif solver == "mkl" and (method not in ["direct", "power"]):
raise Exception("MKL solver only for direct or power methods.")
elif solver not in ["scipy", "mkl"]:
raise Exception("Invalid solver kwarg.")
if solver == "scipy":
ss_args = _default_steadystate_args()
elif solver == "mkl":
ss_args = _mkl_steadystate_args()
else:
raise Exception("Invalid solver keyword argument.")
ss_args["method"] = method
ss_args["info"]["solver"] = ss_args["solver"]
ss_args["info"]["method"] = ss_args["method"]
for key in kwargs.keys():
if key in ss_args.keys():
ss_args[key] = kwargs[key]
else:
raise Exception(
"Invalid keyword argument '" + key + "' passed to steadystate."
)
# Set column perm to NATURAL if using RCM and not specified by user
if ss_args["use_rcm"] and ("permc_spec" not in kwargs.keys()):
ss_args["permc_spec"] = "NATURAL"
# Create & check Liouvillian
A = _steadystate_setup(A, c_op_list)
# Set weight parameter to avg abs val in L if not set explicitly
if "weight" not in kwargs.keys():
ss_args["weight"] = np.mean(np.abs(A.data.data.max()))
ss_args["info"]["weight"] = ss_args["weight"]
if ss_args["method"] == "direct":
if (ss_args["solver"] == "scipy" and ss_args["sparse"]) or ss_args[
"solver"
] == "mkl":
return _steadystate_direct_sparse(A, ss_args)
else:
return _steadystate_direct_dense(A, ss_args)
elif ss_args["method"] == "eigen":
return _steadystate_eigen(A, ss_args)
elif ss_args["method"] in [
"iterative-gmres",
"iterative-lgmres",
"iterative-bicgstab",
]:
return _steadystate_iterative(A, ss_args)
elif ss_args["method"] == "svd":
return _steadystate_svd_dense(A, ss_args)
elif ss_args["method"] in [
"power",
"power-gmres",
"power-lgmres",
"power-bicgstab",
]:
return _steadystate_power(A, ss_args)
else:
raise ValueError("Invalid method argument for steadystate.")
|
https://github.com/qutip/qutip/issues/862
|
..................................................
======================================================================
ERROR: Steady state: Thermal qubit - power-gmres solver
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/Users/shahnawaz/dev/qutip/qutip/tests/test_steadystate.py", line 145, in test_qubit_power_gmres
rho_ss = steadystate(H, c_op_list, method='power-gmres')
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 298, in steadystate
return _steadystate_power(A, ss_args)
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 863, in _steadystate_power
v = v / la.norm(v, np.inf)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/scipy/linalg/misc.py", line 137, in norm
a = np.asarray_chkfinite(a)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/numpy/lib/function_base.py", line 1233, in asarray_chkfinite
"array must not contain infs or NaNs")
ValueError: array must not contain infs or NaNs
----------------------------------------------------------------------
Ran 561 tests in 576.175s
FAILED (SKIP=19, errors=1)
|
ValueError
|
def _steadystate_direct_sparse(L, ss_args):
"""
Direct solver that uses scipy sparse matrices
"""
if settings.debug:
logger.debug("Starting direct LU solver.")
dims = L.dims[0]
n = int(np.sqrt(L.shape[0]))
b = np.zeros(n**2, dtype=complex)
b[0] = ss_args["weight"]
if ss_args["solver"] == "mkl":
has_mkl = 1
else:
has_mkl = 0
ss_lu_liouv_list = _steadystate_LU_liouvillian(L, ss_args, has_mkl)
L, perm, perm2, rev_perm, ss_args = ss_lu_liouv_list
if np.any(perm):
b = b[
np.ix_(
perm,
)
]
if np.any(perm2):
b = b[
np.ix_(
perm2,
)
]
if ss_args["solver"] == "scipy":
ss_args["info"]["permc_spec"] = ss_args["permc_spec"]
ss_args["info"]["drop_tol"] = ss_args["drop_tol"]
ss_args["info"]["diag_pivot_thresh"] = ss_args["diag_pivot_thresh"]
ss_args["info"]["fill_factor"] = ss_args["fill_factor"]
ss_args["info"]["ILU_MILU"] = ss_args["ILU_MILU"]
if not ss_args["solver"] == "mkl":
# Use superLU solver
orig_nnz = L.nnz
_direct_start = time.time()
lu = splu(
L,
permc_spec=ss_args["permc_spec"],
diag_pivot_thresh=ss_args["diag_pivot_thresh"],
options=dict(ILU_MILU=ss_args["ILU_MILU"]),
)
v = lu.solve(b)
_direct_end = time.time()
ss_args["info"]["solution_time"] = _direct_end - _direct_start
if (settings.debug or ss_args["return_info"]) and _scipy_check:
L_nnz = lu.L.nnz
U_nnz = lu.U.nnz
ss_args["info"]["l_nnz"] = L_nnz
ss_args["info"]["u_nnz"] = U_nnz
ss_args["info"]["lu_fill_factor"] = (L_nnz + U_nnz) / L.nnz
if settings.debug:
logger.debug("L NNZ: %i ; U NNZ: %i" % (L_nnz, U_nnz))
logger.debug("Fill factor: %f" % ((L_nnz + U_nnz) / orig_nnz))
else: # Use MKL solver
if len(ss_args["info"]["perm"]) != 0:
in_perm = np.arange(n**2, dtype=np.int32)
else:
in_perm = None
_direct_start = time.time()
v = mkl_spsolve(
L,
b,
perm=in_perm,
verbose=ss_args["verbose"],
max_iter_refine=ss_args["max_iter_refine"],
scaling_vectors=ss_args["scaling_vectors"],
weighted_matching=ss_args["weighted_matching"],
)
_direct_end = time.time()
ss_args["info"]["solution_time"] = _direct_end - _direct_start
if ss_args["return_info"]:
ss_args["info"]["residual_norm"] = la.norm(b - L * v, np.inf)
ss_args["info"]["max_iter_refine"] = ss_args["max_iter_refine"]
ss_args["info"]["scaling_vectors"] = ss_args["scaling_vectors"]
ss_args["info"]["weighted_matching"] = ss_args["weighted_matching"]
if ss_args["use_rcm"]:
v = v[
np.ix_(
rev_perm,
)
]
data = dense2D_to_fastcsr_fmode(vec2mat(v), n, n)
data = 0.5 * (data + data.H)
if ss_args["return_info"]:
return Qobj(data, dims=dims, isherm=True), ss_args["info"]
else:
return Qobj(data, dims=dims, isherm=True)
|
def _steadystate_direct_sparse(L, ss_args):
"""
Direct solver that uses scipy sparse matrices
"""
if settings.debug:
logger.debug("Starting direct LU solver.")
dims = L.dims[0]
n = int(np.sqrt(L.shape[0]))
b = np.zeros(n**2, dtype=complex)
b[0] = ss_args["weight"]
if ss_args["solver"] == "mkl":
has_mkl = 1
else:
has_mkl = 0
L, perm, perm2, rev_perm, ss_args = _steadystate_LU_liouvillian(L, ss_args, has_mkl)
if np.any(perm):
b = b[
np.ix_(
perm,
)
]
if np.any(perm2):
b = b[
np.ix_(
perm2,
)
]
if ss_args["solver"] == "scipy":
ss_args["info"]["permc_spec"] = ss_args["permc_spec"]
ss_args["info"]["drop_tol"] = ss_args["drop_tol"]
ss_args["info"]["diag_pivot_thresh"] = ss_args["diag_pivot_thresh"]
ss_args["info"]["fill_factor"] = ss_args["fill_factor"]
ss_args["info"]["ILU_MILU"] = ss_args["ILU_MILU"]
if not ss_args["solver"] == "mkl":
# Use superLU solver
orig_nnz = L.nnz
_direct_start = time.time()
lu = splu(
L,
permc_spec=ss_args["permc_spec"],
diag_pivot_thresh=ss_args["diag_pivot_thresh"],
options=dict(ILU_MILU=ss_args["ILU_MILU"]),
)
v = lu.solve(b)
_direct_end = time.time()
ss_args["info"]["solution_time"] = _direct_end - _direct_start
if (settings.debug or ss_args["return_info"]) and _scipy_check:
L_nnz = lu.L.nnz
U_nnz = lu.U.nnz
ss_args["info"]["l_nnz"] = L_nnz
ss_args["info"]["u_nnz"] = U_nnz
ss_args["info"]["lu_fill_factor"] = (L_nnz + U_nnz) / L.nnz
if settings.debug:
logger.debug("L NNZ: %i ; U NNZ: %i" % (L_nnz, U_nnz))
logger.debug("Fill factor: %f" % ((L_nnz + U_nnz) / orig_nnz))
else: # Use MKL solver
if len(ss_args["info"]["perm"]) != 0:
in_perm = np.arange(n**2, dtype=np.int32)
else:
in_perm = None
_direct_start = time.time()
v = mkl_spsolve(
L,
b,
perm=in_perm,
verbose=ss_args["verbose"],
max_iter_refine=ss_args["max_iter_refine"],
scaling_vectors=ss_args["scaling_vectors"],
weighted_matching=ss_args["weighted_matching"],
)
_direct_end = time.time()
ss_args["info"]["solution_time"] = _direct_end - _direct_start
if ss_args["return_info"]:
ss_args["info"]["residual_norm"] = la.norm(b - L * v, np.inf)
ss_args["info"]["max_iter_refine"] = ss_args["max_iter_refine"]
ss_args["info"]["scaling_vectors"] = ss_args["scaling_vectors"]
ss_args["info"]["weighted_matching"] = ss_args["weighted_matching"]
if ss_args["use_rcm"]:
v = v[
np.ix_(
rev_perm,
)
]
data = dense2D_to_fastcsr_fmode(vec2mat(v), n, n)
data = 0.5 * (data + data.H)
if ss_args["return_info"]:
return Qobj(data, dims=dims, isherm=True), ss_args["info"]
else:
return Qobj(data, dims=dims, isherm=True)
|
https://github.com/qutip/qutip/issues/862
|
..................................................
======================================================================
ERROR: Steady state: Thermal qubit - power-gmres solver
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/Users/shahnawaz/dev/qutip/qutip/tests/test_steadystate.py", line 145, in test_qubit_power_gmres
rho_ss = steadystate(H, c_op_list, method='power-gmres')
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 298, in steadystate
return _steadystate_power(A, ss_args)
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 863, in _steadystate_power
v = v / la.norm(v, np.inf)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/scipy/linalg/misc.py", line 137, in norm
a = np.asarray_chkfinite(a)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/numpy/lib/function_base.py", line 1233, in asarray_chkfinite
"array must not contain infs or NaNs")
ValueError: array must not contain infs or NaNs
----------------------------------------------------------------------
Ran 561 tests in 576.175s
FAILED (SKIP=19, errors=1)
|
ValueError
|
def _steadystate_iterative(L, ss_args):
"""
Iterative steady state solver using the GMRES, LGMRES, or BICGSTAB
algorithm and a sparse incomplete LU preconditioner.
"""
ss_iters = {"iter": 0}
def _iter_count(r):
ss_iters["iter"] += 1
return
if settings.debug:
logger.debug("Starting %s solver." % ss_args["method"])
dims = L.dims[0]
n = int(np.sqrt(L.shape[0]))
b = np.zeros(n**2)
b[0] = ss_args["weight"]
L, perm, perm2, rev_perm, ss_args = _steadystate_LU_liouvillian(L, ss_args)
if np.any(perm):
b = b[
np.ix_(
perm,
)
]
if np.any(perm2):
b = b[
np.ix_(
perm2,
)
]
use_solver(assumeSortedIndices=True)
if ss_args["M"] is None and ss_args["use_precond"]:
ss_args["M"], ss_args = _iterative_precondition(L, n, ss_args)
if ss_args["M"] is None:
warnings.warn("Preconditioning failed. Continuing without.", UserWarning)
# Select iterative solver type
_iter_start = time.time()
# FIXME: These atol keyword except checks can be removed once scipy 1.1
# is a minimum requirement
if ss_args["method"] == "iterative-gmres":
try:
v, check = gmres(
L,
b,
tol=ss_args["tol"],
atol=ss_args["matol"],
M=ss_args["M"],
x0=ss_args["x0"],
restart=ss_args["restart"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
except TypeError as e:
if "unexpected keyword argument 'atol'" in str(e):
v, check = gmres(
L,
b,
tol=ss_args["tol"],
M=ss_args["M"],
x0=ss_args["x0"],
restart=ss_args["restart"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
elif ss_args["method"] == "iterative-lgmres":
try:
v, check = lgmres(
L,
b,
tol=ss_args["tol"],
atol=ss_args["matol"],
M=ss_args["M"],
x0=ss_args["x0"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
except TypeError as e:
if "unexpected keyword argument 'atol'" in str(e):
v, check = lgmres(
L,
b,
tol=ss_args["tol"],
M=ss_args["M"],
x0=ss_args["x0"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
elif ss_args["method"] == "iterative-bicgstab":
try:
v, check = bicgstab(
L,
b,
tol=ss_args["tol"],
atol=ss_args["matol"],
M=ss_args["M"],
x0=ss_args["x0"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
except TypeError as e:
if "unexpected keyword argument 'atol'" in str(e):
v, check = bicgstab(
L,
b,
tol=ss_args["tol"],
M=ss_args["M"],
x0=ss_args["x0"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
else:
raise Exception("Invalid iterative solver method.")
_iter_end = time.time()
ss_args["info"]["iter_time"] = _iter_end - _iter_start
if "precond_time" in ss_args["info"].keys():
ss_args["info"]["solution_time"] = (
ss_args["info"]["iter_time"] + ss_args["info"]["precond_time"]
)
else:
ss_args["info"]["solution_time"] = ss_args["info"]["iter_time"]
ss_args["info"]["iterations"] = ss_iters["iter"]
if ss_args["return_info"]:
ss_args["info"]["residual_norm"] = la.norm(b - L * v, np.inf)
if settings.debug:
logger.debug("Number of Iterations: %i" % ss_iters["iter"])
logger.debug("Iteration. time: %f" % (_iter_end - _iter_start))
if check > 0:
raise Exception(
"Steadystate error: Did not reach tolerance after "
+ str(ss_args["maxiter"])
+ " steps."
+ "\nResidual norm: "
+ str(ss_args["info"]["residual_norm"])
)
elif check < 0:
raise Exception(
"Steadystate error: Failed with fatal error: " + str(check) + "."
)
if ss_args["use_rcm"]:
v = v[
np.ix_(
rev_perm,
)
]
data = vec2mat(v)
data = 0.5 * (data + data.conj().T)
if ss_args["return_info"]:
return Qobj(data, dims=dims, isherm=True), ss_args["info"]
else:
return Qobj(data, dims=dims, isherm=True)
|
def _steadystate_iterative(L, ss_args):
"""
Iterative steady state solver using the GMRES, LGMRES, or BICGSTAB
algorithm and a sparse incomplete LU preconditioner.
"""
ss_iters = {"iter": 0}
def _iter_count(r):
ss_iters["iter"] += 1
return
if settings.debug:
logger.debug("Starting %s solver." % ss_args["method"])
dims = L.dims[0]
n = int(np.sqrt(L.shape[0]))
b = np.zeros(n**2)
b[0] = ss_args["weight"]
L, perm, perm2, rev_perm, ss_args = _steadystate_LU_liouvillian(L, ss_args)
if np.any(perm):
b = b[
np.ix_(
perm,
)
]
if np.any(perm2):
b = b[
np.ix_(
perm2,
)
]
use_solver(assumeSortedIndices=True)
if ss_args["M"] is None and ss_args["use_precond"]:
ss_args["M"], ss_args = _iterative_precondition(L, n, ss_args)
if ss_args["M"] is None:
warnings.warn("Preconditioning failed. Continuing without.", UserWarning)
# Select iterative solver type
_iter_start = time.time()
if ss_args["method"] == "iterative-gmres":
v, check = gmres(
L,
b,
tol=ss_args["tol"],
M=ss_args["M"],
x0=ss_args["x0"],
restart=ss_args["restart"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
elif ss_args["method"] == "iterative-lgmres":
v, check = lgmres(
L,
b,
tol=ss_args["tol"],
M=ss_args["M"],
x0=ss_args["x0"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
elif ss_args["method"] == "iterative-bicgstab":
v, check = bicgstab(
L,
b,
tol=ss_args["tol"],
M=ss_args["M"],
x0=ss_args["x0"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
else:
raise Exception("Invalid iterative solver method.")
_iter_end = time.time()
ss_args["info"]["iter_time"] = _iter_end - _iter_start
if "precond_time" in ss_args["info"].keys():
ss_args["info"]["solution_time"] = (
ss_args["info"]["iter_time"] + ss_args["info"]["precond_time"]
)
else:
ss_args["info"]["solution_time"] = ss_args["info"]["iter_time"]
ss_args["info"]["iterations"] = ss_iters["iter"]
if ss_args["return_info"]:
ss_args["info"]["residual_norm"] = la.norm(b - L * v, np.inf)
if settings.debug:
logger.debug("Number of Iterations: %i" % ss_iters["iter"])
logger.debug("Iteration. time: %f" % (_iter_end - _iter_start))
if check > 0:
raise Exception(
"Steadystate error: Did not reach tolerance after "
+ str(ss_args["maxiter"])
+ " steps."
+ "\nResidual norm: "
+ str(ss_args["info"]["residual_norm"])
)
elif check < 0:
raise Exception(
"Steadystate error: Failed with fatal error: " + str(check) + "."
)
if ss_args["use_rcm"]:
v = v[
np.ix_(
rev_perm,
)
]
data = vec2mat(v)
data = 0.5 * (data + data.conj().T)
if ss_args["return_info"]:
return Qobj(data, dims=dims, isherm=True), ss_args["info"]
else:
return Qobj(data, dims=dims, isherm=True)
|
https://github.com/qutip/qutip/issues/862
|
..................................................
======================================================================
ERROR: Steady state: Thermal qubit - power-gmres solver
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/Users/shahnawaz/dev/qutip/qutip/tests/test_steadystate.py", line 145, in test_qubit_power_gmres
rho_ss = steadystate(H, c_op_list, method='power-gmres')
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 298, in steadystate
return _steadystate_power(A, ss_args)
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 863, in _steadystate_power
v = v / la.norm(v, np.inf)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/scipy/linalg/misc.py", line 137, in norm
a = np.asarray_chkfinite(a)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/numpy/lib/function_base.py", line 1233, in asarray_chkfinite
"array must not contain infs or NaNs")
ValueError: array must not contain infs or NaNs
----------------------------------------------------------------------
Ran 561 tests in 576.175s
FAILED (SKIP=19, errors=1)
|
ValueError
|
def _steadystate_power(L, ss_args):
"""
Inverse power method for steady state solving.
"""
ss_args["info"].pop("weight", None)
if settings.debug:
logger.debug("Starting iterative inverse-power method solver.")
tol = ss_args["tol"]
mtol = ss_args["mtol"]
if mtol is None:
mtol = max(0.1 * tol, 1e-15)
maxiter = ss_args["maxiter"]
use_solver(assumeSortedIndices=True)
rhoss = Qobj()
sflag = issuper(L)
if sflag:
rhoss.dims = L.dims[0]
else:
rhoss.dims = [L.dims[0], 1]
n = L.shape[0]
# Build Liouvillian
if ss_args["solver"] == "mkl" and ss_args["method"] == "power":
has_mkl = 1
else:
has_mkl = 0
L, perm, perm2, rev_perm, ss_args = _steadystate_power_liouvillian(
L, ss_args, has_mkl
)
orig_nnz = L.nnz
# start with all ones as RHS
v = np.ones(n, dtype=complex)
if ss_args["use_rcm"]:
v = v[
np.ix_(
perm2,
)
]
# Do preconditioning
if ss_args["solver"] == "scipy":
if (
ss_args["M"] is None
and ss_args["use_precond"]
and ss_args["method"] in ["power-gmres", "power-lgmres", "power-bicgstab"]
):
ss_args["M"], ss_args = _iterative_precondition(L, int(np.sqrt(n)), ss_args)
if ss_args["M"] is None:
warnings.warn(
"Preconditioning failed. Continuing without.", UserWarning
)
ss_iters = {"iter": 0}
def _iter_count(r):
ss_iters["iter"] += 1
return
_power_start = time.time()
# Get LU factors
if ss_args["method"] == "power":
if ss_args["solver"] == "mkl":
lu = mkl_splu(
L,
max_iter_refine=ss_args["max_iter_refine"],
scaling_vectors=ss_args["scaling_vectors"],
weighted_matching=ss_args["weighted_matching"],
)
else:
lu = splu(
L,
permc_spec=ss_args["permc_spec"],
diag_pivot_thresh=ss_args["diag_pivot_thresh"],
options=dict(ILU_MILU=ss_args["ILU_MILU"]),
)
if settings.debug and _scipy_check:
L_nnz = lu.L.nnz
U_nnz = lu.U.nnz
logger.debug("L NNZ: %i ; U NNZ: %i" % (L_nnz, U_nnz))
logger.debug("Fill factor: %f" % ((L_nnz + U_nnz) / orig_nnz))
it = 0
# FIXME: These atol keyword except checks can be removed once scipy 1.1
# is a minimum requirement
while (la.norm(L * v, np.inf) > tol) and (it < maxiter):
check = 0
if ss_args["method"] == "power":
v = lu.solve(v)
elif ss_args["method"] == "power-gmres":
try:
v, check = gmres(
L,
v,
tol=mtol,
atol=ss_args["matol"],
M=ss_args["M"],
x0=ss_args["x0"],
restart=ss_args["restart"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
except TypeError as e:
if "unexpected keyword argument 'atol'" in str(e):
v, check = gmres(
L,
v,
tol=mtol,
M=ss_args["M"],
x0=ss_args["x0"],
restart=ss_args["restart"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
elif ss_args["method"] == "power-lgmres":
try:
v, check = lgmres(
L,
v,
tol=mtol,
atol=ss_args["matol"],
M=ss_args["M"],
x0=ss_args["x0"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
except TypeError as e:
if "unexpected keyword argument 'atol'" in str(e):
v, check = lgmres(
L,
v,
tol=mtol,
M=ss_args["M"],
x0=ss_args["x0"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
elif ss_args["method"] == "power-bicgstab":
try:
v, check = bicgstab(
L,
v,
tol=mtol,
atol=ss_args["matol"],
M=ss_args["M"],
x0=ss_args["x0"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
except TypeError as e:
if "unexpected keyword argument 'atol'" in str(e):
v, check = bicgstab(
L,
v,
tol=mtol,
M=ss_args["M"],
x0=ss_args["x0"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
else:
raise Exception("Invalid iterative solver method.")
if check > 0:
raise Exception(
"{} failed to find solution in {} iterations.".format(
ss_args["method"], check
)
)
if check < 0:
raise Exception("Breakdown in {}".format(ss_args["method"]))
v = v / la.norm(v, np.inf)
it += 1
if ss_args["method"] == "power" and ss_args["solver"] == "mkl":
lu.delete()
if ss_args["return_info"]:
ss_args["info"]["max_iter_refine"] = ss_args["max_iter_refine"]
ss_args["info"]["scaling_vectors"] = ss_args["scaling_vectors"]
ss_args["info"]["weighted_matching"] = ss_args["weighted_matching"]
if it >= maxiter:
raise Exception(
"Failed to find steady state after " + str(maxiter) + " iterations"
)
_power_end = time.time()
ss_args["info"]["solution_time"] = _power_end - _power_start
ss_args["info"]["iterations"] = it
if ss_args["return_info"]:
ss_args["info"]["residual_norm"] = la.norm(L * v, np.inf)
if settings.debug:
logger.debug("Number of iterations: %i" % it)
if ss_args["use_rcm"]:
v = v[
np.ix_(
rev_perm,
)
]
# normalise according to type of problem
if sflag:
trow = v[:: rhoss.shape[0] + 1]
data = v / np.sum(trow)
else:
data = data / la.norm(v)
data = dense2D_to_fastcsr_fmode(vec2mat(data), rhoss.shape[0], rhoss.shape[0])
rhoss.data = 0.5 * (data + data.H)
rhoss.isherm = True
if ss_args["return_info"]:
return rhoss, ss_args["info"]
else:
return rhoss
|
def _steadystate_power(L, ss_args):
"""
Inverse power method for steady state solving.
"""
ss_args["info"].pop("weight", None)
if settings.debug:
logger.debug("Starting iterative inverse-power method solver.")
tol = ss_args["tol"]
maxiter = ss_args["maxiter"]
use_solver(assumeSortedIndices=True)
rhoss = Qobj()
sflag = issuper(L)
if sflag:
rhoss.dims = L.dims[0]
else:
rhoss.dims = [L.dims[0], 1]
n = L.shape[0]
# Build Liouvillian
if ss_args["solver"] == "mkl" and ss_args["method"] == "power":
has_mkl = 1
else:
has_mkl = 0
L, perm, perm2, rev_perm, ss_args = _steadystate_power_liouvillian(
L, ss_args, has_mkl
)
orig_nnz = L.nnz
# start with all ones as RHS
v = np.ones(n, dtype=complex)
if ss_args["use_rcm"]:
v = v[
np.ix_(
perm2,
)
]
# Do preconditioning
if ss_args["solver"] == "scipy":
if (
ss_args["M"] is None
and ss_args["use_precond"]
and ss_args["method"] in ["power-gmres", "power-lgmres", "power-bicgstab"]
):
ss_args["M"], ss_args = _iterative_precondition(L, int(np.sqrt(n)), ss_args)
if ss_args["M"] is None:
warnings.warn(
"Preconditioning failed. Continuing without.", UserWarning
)
ss_iters = {"iter": 0}
def _iter_count(r):
ss_iters["iter"] += 1
return
_power_start = time.time()
# Get LU factors
if ss_args["method"] == "power":
if ss_args["solver"] == "mkl":
lu = mkl_splu(
L,
max_iter_refine=ss_args["max_iter_refine"],
scaling_vectors=ss_args["scaling_vectors"],
weighted_matching=ss_args["weighted_matching"],
)
else:
lu = splu(
L,
permc_spec=ss_args["permc_spec"],
diag_pivot_thresh=ss_args["diag_pivot_thresh"],
options=dict(ILU_MILU=ss_args["ILU_MILU"]),
)
if settings.debug and _scipy_check:
L_nnz = lu.L.nnz
U_nnz = lu.U.nnz
logger.debug("L NNZ: %i ; U NNZ: %i" % (L_nnz, U_nnz))
logger.debug("Fill factor: %f" % ((L_nnz + U_nnz) / orig_nnz))
it = 0
_tol = max(ss_args["tol"] / 10, 1e-15) # Should make this user accessible
while (la.norm(L * v, np.inf) > tol) and (it < maxiter):
if ss_args["method"] == "power":
v = lu.solve(v)
elif ss_args["method"] == "power-gmres":
v, check = gmres(
L,
v,
tol=_tol,
M=ss_args["M"],
x0=ss_args["x0"],
restart=ss_args["restart"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
elif ss_args["method"] == "power-lgmres":
v, check = lgmres(
L,
v,
tol=_tol,
M=ss_args["M"],
x0=ss_args["x0"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
elif ss_args["method"] == "power-bicgstab":
v, check = bicgstab(
L,
v,
tol=_tol,
M=ss_args["M"],
x0=ss_args["x0"],
maxiter=ss_args["maxiter"],
callback=_iter_count,
)
else:
raise Exception("Invalid iterative solver method.")
v = v / la.norm(v, np.inf)
it += 1
if ss_args["method"] == "power" and ss_args["solver"] == "mkl":
lu.delete()
if ss_args["return_info"]:
ss_args["info"]["max_iter_refine"] = ss_args["max_iter_refine"]
ss_args["info"]["scaling_vectors"] = ss_args["scaling_vectors"]
ss_args["info"]["weighted_matching"] = ss_args["weighted_matching"]
if it >= maxiter:
raise Exception(
"Failed to find steady state after " + str(maxiter) + " iterations"
)
_power_end = time.time()
ss_args["info"]["solution_time"] = _power_end - _power_start
ss_args["info"]["iterations"] = it
if ss_args["return_info"]:
ss_args["info"]["residual_norm"] = la.norm(L * v, np.inf)
if settings.debug:
logger.debug("Number of iterations: %i" % it)
if ss_args["use_rcm"]:
v = v[
np.ix_(
rev_perm,
)
]
# normalise according to type of problem
if sflag:
trow = v[:: rhoss.shape[0] + 1]
data = v / np.sum(trow)
else:
data = data / la.norm(v)
data = dense2D_to_fastcsr_fmode(vec2mat(data), rhoss.shape[0], rhoss.shape[0])
rhoss.data = 0.5 * (data + data.H)
rhoss.isherm = True
if ss_args["return_info"]:
return rhoss, ss_args["info"]
else:
return rhoss
|
https://github.com/qutip/qutip/issues/862
|
..................................................
======================================================================
ERROR: Steady state: Thermal qubit - power-gmres solver
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/Users/shahnawaz/dev/qutip/qutip/tests/test_steadystate.py", line 145, in test_qubit_power_gmres
rho_ss = steadystate(H, c_op_list, method='power-gmres')
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 298, in steadystate
return _steadystate_power(A, ss_args)
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 863, in _steadystate_power
v = v / la.norm(v, np.inf)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/scipy/linalg/misc.py", line 137, in norm
a = np.asarray_chkfinite(a)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/numpy/lib/function_base.py", line 1233, in asarray_chkfinite
"array must not contain infs or NaNs")
ValueError: array must not contain infs or NaNs
----------------------------------------------------------------------
Ran 561 tests in 576.175s
FAILED (SKIP=19, errors=1)
|
ValueError
|
def build_preconditioner(A, c_op_list=[], **kwargs):
"""Constructs a iLU preconditioner necessary for solving for
the steady state density matrix using the iterative linear solvers
in the 'steadystate' function.
Parameters
----------
A : qobj
A Hamiltonian or Liouvillian operator.
c_op_list : list
A list of collapse operators.
return_info : bool, optional, default = False
Return a dictionary of solver-specific infomation about the
solution and how it was obtained.
use_rcm : bool, optional, default = False
Use reverse Cuthill-Mckee reordering to minimize fill-in in the
LU factorization of the Liouvillian.
use_wbm : bool, optional, default = False
Use Weighted Bipartite Matching reordering to make the Liouvillian
diagonally dominant. This is useful for iterative preconditioners
only, and is set to ``True`` by default when finding a preconditioner.
weight : float, optional
Sets the size of the elements used for adding the unity trace condition
to the linear solvers. This is set to the average abs value of the
Liouvillian elements if not specified by the user.
method : str, default = 'iterative'
Tells the preconditioner what type of Liouvillian to build for
iLU factorization. For direct iterative methods use 'iterative'.
For power iterative methods use 'power'.
permc_spec : str, optional, default='COLAMD'
Column ordering used internally by superLU for the
'direct' LU decomposition method. Options include 'COLAMD' and
'NATURAL'. If using RCM then this is set to 'NATURAL' automatically
unless explicitly specified.
fill_factor : float, optional, default = 100
Specifies the fill ratio upper bound (>=1) of the iLU
preconditioner. Lower values save memory at the cost of longer
execution times and a possible singular factorization.
drop_tol : float, optional, default = 1e-4
Sets the threshold for the magnitude of preconditioner
elements that should be dropped. Can be reduced for a courser
factorization at the cost of an increased number of iterations, and a
possible singular factorization.
diag_pivot_thresh : float, optional, default = None
Sets the threshold between [0,1] for which diagonal
elements are considered acceptable pivot points when using a
preconditioner. A value of zero forces the pivot to be the diagonal
element.
ILU_MILU : str, optional, default = 'smilu_2'
Selects the incomplete LU decomposition method algoithm used in
creating the preconditoner. Should only be used by advanced users.
Returns
-------
lu : object
Returns a SuperLU object representing iLU preconditioner.
info : dict, optional
Dictionary containing solver-specific information.
"""
ss_args = _default_steadystate_args()
ss_args["method"] = "iterative"
for key in kwargs.keys():
if key in ss_args.keys():
ss_args[key] = kwargs[key]
else:
raise Exception(
"Invalid keyword argument '" + key + "' passed to steadystate."
)
# Set column perm to NATURAL if using RCM and not specified by user
if ss_args["use_rcm"] and ("permc_spec" not in kwargs.keys()):
ss_args["permc_spec"] = "NATURAL"
L = _steadystate_setup(A, c_op_list)
# Set weight parameter to avg abs val in L if not set explicitly
if "weight" not in kwargs.keys():
ss_args["weight"] = np.mean(np.abs(L.data.data.max()))
ss_args["info"]["weight"] = ss_args["weight"]
n = int(np.sqrt(L.shape[0]))
if ss_args["method"] == "iterative":
ss_list = _steadystate_LU_liouvillian(L, ss_args)
L, perm, perm2, rev_perm, ss_args = ss_list
elif ss_args["method"] == "power":
ss_list = _steadystate_power_liouvillian(L, ss_args)
L, perm, perm2, rev_perm, ss_args = ss_list
else:
raise Exception("Invalid preconditioning method.")
M, ss_args = _iterative_precondition(L, n, ss_args)
if ss_args["return_info"]:
return M, ss_args["info"]
else:
return M
|
def build_preconditioner(A, c_op_list=[], **kwargs):
"""Constructs a iLU preconditioner necessary for solving for
the steady state density matrix using the iterative linear solvers
in the 'steadystate' function.
Parameters
----------
A : qobj
A Hamiltonian or Liouvillian operator.
c_op_list : list
A list of collapse operators.
return_info : bool, optional, default = False
Return a dictionary of solver-specific infomation about the
solution and how it was obtained.
use_rcm : bool, optional, default = False
Use reverse Cuthill-Mckee reordering to minimize fill-in in the
LU factorization of the Liouvillian.
use_wbm : bool, optional, default = False
Use Weighted Bipartite Matching reordering to make the Liouvillian
diagonally dominant. This is useful for iterative preconditioners
only, and is set to ``True`` by default when finding a preconditioner.
weight : float, optional
Sets the size of the elements used for adding the unity trace condition
to the linear solvers. This is set to the average abs value of the
Liouvillian elements if not specified by the user.
method : str, default = 'iterative'
Tells the preconditioner what type of Liouvillian to build for
iLU factorization. For direct iterative methods use 'iterative'.
For power iterative methods use 'power'.
permc_spec : str, optional, default='COLAMD'
Column ordering used internally by superLU for the
'direct' LU decomposition method. Options include 'COLAMD' and
'NATURAL'. If using RCM then this is set to 'NATURAL' automatically
unless explicitly specified.
fill_factor : float, optional, default = 100
Specifies the fill ratio upper bound (>=1) of the iLU
preconditioner. Lower values save memory at the cost of longer
execution times and a possible singular factorization.
drop_tol : float, optional, default = 1e-4
Sets the threshold for the magnitude of preconditioner
elements that should be dropped. Can be reduced for a courser
factorization at the cost of an increased number of iterations, and a
possible singular factorization.
diag_pivot_thresh : float, optional, default = None
Sets the threshold between [0,1] for which diagonal
elements are considered acceptable pivot points when using a
preconditioner. A value of zero forces the pivot to be the diagonal
element.
ILU_MILU : str, optional, default = 'smilu_2'
Selects the incomplete LU decomposition method algoithm used in
creating the preconditoner. Should only be used by advanced users.
Returns
-------
lu : object
Returns a SuperLU object representing iLU preconditioner.
info : dict, optional
Dictionary containing solver-specific information.
"""
ss_args = _default_steadystate_args()
ss_args["method"] = "iterative"
for key in kwargs.keys():
if key in ss_args.keys():
ss_args[key] = kwargs[key]
else:
raise Exception(
"Invalid keyword argument '" + key + "' passed to steadystate."
)
# Set column perm to NATURAL if using RCM and not specified by user
if ss_args["use_rcm"] and ("permc_spec" not in kwargs.keys()):
ss_args["permc_spec"] = "NATURAL"
L = _steadystate_setup(A, c_op_list)
# Set weight parameter to avg abs val in L if not set explicitly
if "weight" not in kwargs.keys():
ss_args["weight"] = np.mean(np.abs(L.data.data.max()))
ss_args["info"]["weight"] = ss_args["weight"]
n = int(np.sqrt(L.shape[0]))
if ss_args["method"] == "iterative":
L, perm, perm2, rev_perm, ss_args = _steadystate_LU_liouvillian(L, ss_args)
elif ss_args["method"] == "power":
L, perm, perm2, rev_perm, ss_args = _steadystate_power_liouvillian(L, ss_args)
else:
raise Exception("Invalid preconditioning method.")
M, ss_args = _iterative_precondition(L, n, ss_args)
if ss_args["return_info"]:
return M, ss_args["info"]
else:
return M
|
https://github.com/qutip/qutip/issues/862
|
..................................................
======================================================================
ERROR: Steady state: Thermal qubit - power-gmres solver
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/Users/shahnawaz/dev/qutip/qutip/tests/test_steadystate.py", line 145, in test_qubit_power_gmres
rho_ss = steadystate(H, c_op_list, method='power-gmres')
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 298, in steadystate
return _steadystate_power(A, ss_args)
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 863, in _steadystate_power
v = v / la.norm(v, np.inf)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/scipy/linalg/misc.py", line 137, in norm
a = np.asarray_chkfinite(a)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/numpy/lib/function_base.py", line 1233, in asarray_chkfinite
"array must not contain infs or NaNs")
ValueError: array must not contain infs or NaNs
----------------------------------------------------------------------
Ran 561 tests in 576.175s
FAILED (SKIP=19, errors=1)
|
ValueError
|
def _pseudo_inverse_sparse(L, rhoss, w=None, **pseudo_args):
"""
Internal function for computing the pseudo inverse of an Liouvillian using
sparse matrix methods. See pseudo_inverse for details.
"""
N = np.prod(L.dims[0][0])
rhoss_vec = operator_to_vector(rhoss)
tr_op = tensor([identity(n) for n in L.dims[0][0]])
tr_op_vec = operator_to_vector(tr_op)
P = zcsr_kron(rhoss_vec.data, tr_op_vec.data.T)
I = sp.eye(N * N, N * N, format="csr")
Q = I - P
if w is None:
L = 1.0j * (1e-15) * spre(tr_op) + L
else:
if w != 0.0:
L = 1.0j * w * spre(tr_op) + L
else:
L = 1.0j * (1e-15) * spre(tr_op) + L
if pseudo_args["use_rcm"]:
perm = reverse_cuthill_mckee(L.data)
A = sp_permute(L.data, perm, perm)
Q = sp_permute(Q, perm, perm)
else:
if ss_args["solver"] == "scipy":
A = L.data.tocsc()
A.sort_indices()
if pseudo_args["method"] == "splu":
if settings.has_mkl:
A = L.data.tocsr()
A.sort_indices()
LIQ = mkl_spsolve(A, Q.toarray())
else:
pspec = pseudo_args["permc_spec"]
diag_p_thresh = pseudo_args["diag_pivot_thresh"]
pseudo_args = pseudo_args["ILU_MILU"]
lu = sp.linalg.splu(
A,
permc_spec=pspec,
diag_pivot_thresh=diag_p_thresh,
options=dict(ILU_MILU=pseudo_args),
)
LIQ = lu.solve(Q.toarray())
elif pseudo_args["method"] == "spilu":
lu = sp.linalg.spilu(
A,
permc_spec=pseudo_args["permc_spec"],
fill_factor=pseudo_args["fill_factor"],
drop_tol=pseudo_args["drop_tol"],
)
LIQ = lu.solve(Q.toarray())
else:
raise ValueError("unsupported method '%s'" % method)
R = sp.csr_matrix(Q * LIQ)
if pseudo_args["use_rcm"]:
rev_perm = np.argsort(perm)
R = sp_permute(R, rev_perm, rev_perm, "csr")
return Qobj(R, dims=L.dims)
|
def _pseudo_inverse_sparse(L, rhoss, w=None, **pseudo_args):
"""
Internal function for computing the pseudo inverse of an Liouvillian using
sparse matrix methods. See pseudo_inverse for details.
"""
N = np.prod(L.dims[0][0])
rhoss_vec = operator_to_vector(rhoss)
tr_op = tensor([identity(n) for n in L.dims[0][0]])
tr_op_vec = operator_to_vector(tr_op)
P = zcsr_kron(rhoss_vec.data, tr_op_vec.data.T)
I = sp.eye(N * N, N * N, format="csr")
Q = I - P
if w is None:
L = 1.0j * (1e-15) * spre(tr_op) + L
else:
if w != 0.0:
L = 1.0j * w * spre(tr_op) + L
else:
L = 1.0j * (1e-15) * spre(tr_op) + L
if pseudo_args["use_rcm"]:
perm = reverse_cuthill_mckee(L.data)
A = sp_permute(L.data, perm, perm)
Q = sp_permute(Q, perm, perm)
else:
if ss_args["solver"] == "scipy":
A = L.data.tocsc()
A.sort_indices()
if pseudo_args["method"] == "splu":
if settings.has_mkl:
A = L.data.tocsr()
A.sort_indices()
LIQ = mkl_spsolve(A, Q.toarray())
else:
lu = sp.linalg.splu(
A,
permc_spec=pseudo_args["permc_spec"],
diag_pivot_thresh=pseudo_args["diag_pivot_thresh"],
options=dict(ILU_MILU=pseudo_args["ILU_MILU"]),
)
LIQ = lu.solve(Q.toarray())
elif pseudo_args["method"] == "spilu":
lu = sp.linalg.spilu(
A,
permc_spec=pseudo_args["permc_spec"],
fill_factor=pseudo_args["fill_factor"],
drop_tol=pseudo_args["drop_tol"],
)
LIQ = lu.solve(Q.toarray())
else:
raise ValueError("unsupported method '%s'" % method)
R = sp.csr_matrix(Q * LIQ)
if pseudo_args["use_rcm"]:
rev_perm = np.argsort(perm)
R = sp_permute(R, rev_perm, rev_perm, "csr")
return Qobj(R, dims=L.dims)
|
https://github.com/qutip/qutip/issues/862
|
..................................................
======================================================================
ERROR: Steady state: Thermal qubit - power-gmres solver
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/Users/shahnawaz/dev/qutip/qutip/tests/test_steadystate.py", line 145, in test_qubit_power_gmres
rho_ss = steadystate(H, c_op_list, method='power-gmres')
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 298, in steadystate
return _steadystate_power(A, ss_args)
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 863, in _steadystate_power
v = v / la.norm(v, np.inf)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/scipy/linalg/misc.py", line 137, in norm
a = np.asarray_chkfinite(a)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/numpy/lib/function_base.py", line 1233, in asarray_chkfinite
"array must not contain infs or NaNs")
ValueError: array must not contain infs or NaNs
----------------------------------------------------------------------
Ran 561 tests in 576.175s
FAILED (SKIP=19, errors=1)
|
ValueError
|
def pseudo_inverse(L, rhoss=None, w=None, sparse=True, **kwargs):
"""
Compute the pseudo inverse for a Liouvillian superoperator, optionally
given its steady state density matrix (which will be computed if not
given).
Returns
-------
L : Qobj
A Liouvillian superoperator for which to compute the pseudo inverse.
rhoss : Qobj
A steadystate density matrix as Qobj instance, for the Liouvillian
superoperator L.
w : double
frequency at which to evaluate pseudo-inverse. Can be zero for dense
systems and large sparse systems. Small sparse systems can fail for
zero frequencies.
sparse : bool
Flag that indicate whether to use sparse or dense matrix methods when
computing the pseudo inverse.
method : string
Name of method to use. For sparse=True, allowed values are 'spsolve',
'splu' and 'spilu'. For sparse=False, allowed values are 'direct' and
'numpy'.
kwargs : dictionary
Additional keyword arguments for setting parameters for solver methods.
Returns
-------
R : Qobj
Returns a Qobj instance representing the pseudo inverse of L.
Note
----
In general the inverse of a sparse matrix will be dense. If you
are applying the inverse to a density matrix then it is better to
cast the problem as an Ax=b type problem where the explicit calculation
of the inverse is not required. See page 67 of "Electrons in
nanostructures" C. Flindt, PhD Thesis available online:
http://orbit.dtu.dk/fedora/objects/orbit:82314/datastreams/
file_4732600/content
Note also that the definition of the pseudo-inverse herein is different
from numpys pinv() alone, as it includes pre and post projection onto
the subspace defined by the projector Q.
"""
pseudo_args = _default_steadystate_args()
for key in kwargs.keys():
if key in pseudo_args.keys():
pseudo_args[key] = kwargs[key]
else:
raise Exception(
"Invalid keyword argument '" + key + "' passed to pseudo_inverse."
)
if "method" not in kwargs.keys():
pseudo_args["method"] = "splu"
# Set column perm to NATURAL if using RCM and not specified by user
if pseudo_args["use_rcm"] and ("permc_spec" not in kwargs.keys()):
pseudo_args["permc_spec"] = "NATURAL"
if rhoss is None:
rhoss = steadystate(L, **pseudo_args)
if sparse:
return _pseudo_inverse_sparse(L, rhoss, w=w, **pseudo_args)
else:
if pseudo_args["method"] != "splu":
pseudo_args["method"] = pseudo_args["method"]
else:
pseudo_args["method"] = "direct"
return _pseudo_inverse_dense(L, rhoss, w=w, **pseudo_args)
|
def pseudo_inverse(L, rhoss=None, w=None, sparse=True, **kwargs):
"""
Compute the pseudo inverse for a Liouvillian superoperator, optionally
given its steady state density matrix (which will be computed if not given).
Returns
-------
L : Qobj
A Liouvillian superoperator for which to compute the pseudo inverse.
rhoss : Qobj
A steadystate density matrix as Qobj instance, for the Liouvillian
superoperator L.
w : double
frequency at which to evaluate pseudo-inverse. Can be zero for dense systems
and large sparse systems. Small sparse systems can fail for zero frequencies.
sparse : bool
Flag that indicate whether to use sparse or dense matrix methods when
computing the pseudo inverse.
method : string
Name of method to use. For sparse=True, allowed values are 'spsolve',
'splu' and 'spilu'. For sparse=False, allowed values are 'direct' and
'numpy'.
kwargs : dictionary
Additional keyword arguments for setting parameters for solver methods.
Returns
-------
R : Qobj
Returns a Qobj instance representing the pseudo inverse of L.
Note
----
In general the inverse of a sparse matrix will be dense. If you
are applying the inverse to a density matrix then it is better to
cast the problem as an Ax=b type problem where the explicit calculation
of the inverse is not required. See page 67 of "Electrons in nanostructures"
C. Flindt, PhD Thesis available online:
http://orbit.dtu.dk/fedora/objects/orbit:82314/datastreams/file_4732600/content
Note also that the definition of the pseudo-inverse herein is different
from numpys pinv() alone, as it includes pre and post projection onto
the subspace defined by the projector Q.
"""
pseudo_args = _default_steadystate_args()
for key in kwargs.keys():
if key in pseudo_args.keys():
pseudo_args[key] = kwargs[key]
else:
raise Exception(
"Invalid keyword argument '" + key + "' passed to pseudo_inverse."
)
if "method" not in kwargs.keys():
pseudo_args["method"] = "splu"
# Set column perm to NATURAL if using RCM and not specified by user
if pseudo_args["use_rcm"] and ("permc_spec" not in kwargs.keys()):
pseudo_args["permc_spec"] = "NATURAL"
if rhoss is None:
rhoss = steadystate(L, **pseudo_args)
if sparse:
return _pseudo_inverse_sparse(L, rhoss, w=w, **pseudo_args)
else:
pseudo_args["method"] = (
pseudo_args["method"] if pseudo_args["method"] != "splu" else "direct"
)
return _pseudo_inverse_dense(L, rhoss, w=w, **pseudo_args)
|
https://github.com/qutip/qutip/issues/862
|
..................................................
======================================================================
ERROR: Steady state: Thermal qubit - power-gmres solver
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/Users/shahnawaz/dev/qutip/qutip/tests/test_steadystate.py", line 145, in test_qubit_power_gmres
rho_ss = steadystate(H, c_op_list, method='power-gmres')
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 298, in steadystate
return _steadystate_power(A, ss_args)
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 863, in _steadystate_power
v = v / la.norm(v, np.inf)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/scipy/linalg/misc.py", line 137, in norm
a = np.asarray_chkfinite(a)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/numpy/lib/function_base.py", line 1233, in asarray_chkfinite
"array must not contain infs or NaNs")
ValueError: array must not contain infs or NaNs
----------------------------------------------------------------------
Ran 561 tests in 576.175s
FAILED (SKIP=19, errors=1)
|
ValueError
|
def _rhs_rho_milstein_implicit(L, rho_t, t, A, dt, ddW, d1, d2, args):
"""
Drift implicit Milstein (theta = 1/2, eta = 0)
Wang, X., Gan, S., & Wang, D. (2012).
A family of fully implicit Milstein methods for stiff stochastic differential
equations with multiplicative noise.
BIT Numerical Mathematics, 52(3), 741–772.
"""
dW = ddW[:, 0]
A = A[0]
# reusable operators and traces
a = A[-1] * rho_t * (0.5 * dt)
e0 = cy_expect_rho_vec(A[0], rho_t, 1)
b = A[0] * rho_t - e0 * rho_t
TrAb = cy_expect_rho_vec(A[0], b, 1)
drho_t = b * dW[0]
drho_t += a
drho_t += (A[0] * b - TrAb * rho_t - e0 * b) * dW[1] # Milstein term
drho_t += rho_t
# FIXME: This atol keyword except check can be removed once scipy 1.1
# is a minimum requirement
try:
v, check = sp.linalg.bicgstab(
A[-2], drho_t, x0=drho_t + a, tol=args["tol"], atol="legacy"
)
except TypeError as e:
if "unexpected keyword argument 'atol'" in str(e):
v, check = sp.linalg.bicgstab(A[-2], drho_t, x0=drho_t + a, tol=args["tol"])
return v
|
def _rhs_rho_milstein_implicit(L, rho_t, t, A, dt, ddW, d1, d2, args):
"""
Drift implicit Milstein (theta = 1/2, eta = 0)
Wang, X., Gan, S., & Wang, D. (2012).
A family of fully implicit Milstein methods for stiff stochastic differential
equations with multiplicative noise.
BIT Numerical Mathematics, 52(3), 741–772.
"""
dW = ddW[:, 0]
A = A[0]
# reusable operators and traces
a = A[-1] * rho_t * (0.5 * dt)
e0 = cy_expect_rho_vec(A[0], rho_t, 1)
b = A[0] * rho_t - e0 * rho_t
TrAb = cy_expect_rho_vec(A[0], b, 1)
drho_t = b * dW[0]
drho_t += a
drho_t += (A[0] * b - TrAb * rho_t - e0 * b) * dW[1] # Milstein term
drho_t += rho_t
v, check = sp.linalg.bicgstab(A[-2], drho_t, x0=drho_t + a, tol=args["tol"])
return v
|
https://github.com/qutip/qutip/issues/862
|
..................................................
======================================================================
ERROR: Steady state: Thermal qubit - power-gmres solver
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/Users/shahnawaz/dev/qutip/qutip/tests/test_steadystate.py", line 145, in test_qubit_power_gmres
rho_ss = steadystate(H, c_op_list, method='power-gmres')
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 298, in steadystate
return _steadystate_power(A, ss_args)
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 863, in _steadystate_power
v = v / la.norm(v, np.inf)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/scipy/linalg/misc.py", line 137, in norm
a = np.asarray_chkfinite(a)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/numpy/lib/function_base.py", line 1233, in asarray_chkfinite
"array must not contain infs or NaNs")
ValueError: array must not contain infs or NaNs
----------------------------------------------------------------------
Ran 561 tests in 576.175s
FAILED (SKIP=19, errors=1)
|
ValueError
|
def _rhs_rho_taylor_15_implicit(L, rho_t, t, A, dt, ddW, d1, d2, args):
"""
Drift implicit Taylor 1.5 (alpha = 1/2, beta = doesn't matter)
Chaptert 12.2 Eq. (2.18) in Numerical Solution of Stochastic Differential Equations
By Peter E. Kloeden, Eckhard Platen
"""
dW = ddW[:, 0]
A = A[0]
# reusable operators and traces
a = A[-1] * rho_t
e0 = cy_expect_rho_vec(A[0], rho_t, 1)
b = A[0] * rho_t - e0 * rho_t
TrAb = cy_expect_rho_vec(A[0], b, 1)
Lb = A[0] * b - TrAb * rho_t - e0 * b
TrALb = cy_expect_rho_vec(A[0], Lb, 1)
TrAa = cy_expect_rho_vec(A[0], a, 1)
drho_t = b * dW[0]
drho_t += Lb * dW[1] # Milstein term
xx0 = (
drho_t + a * dt
) + rho_t # starting vector for the linear solver (Milstein prediction)
drho_t += (0.5 * dt) * a
# new terms:
drho_t += A[-1] * b * (dW[2] - 0.5 * dW[0] * dt)
drho_t += (A[0] * a - TrAa * rho_t - e0 * a - TrAb * b) * dW[3]
drho_t += (A[0] * Lb - TrALb * rho_t - (2 * TrAb) * b - e0 * Lb) * dW[4]
drho_t += rho_t
# FIXME: This atol keyword except check can be removed once scipy 1.1
# is a minimum requirement
try:
v, check = sp.linalg.bicgstab(
A[-2], drho_t, x0=xx0, tol=args["tol"], atol="legacy"
)
except TypeError as e:
if "unexpected keyword argument 'atol'" in str(e):
v, check = sp.linalg.bicgstab(A[-2], drho_t, x0=xx0, tol=args["tol"])
return v
|
def _rhs_rho_taylor_15_implicit(L, rho_t, t, A, dt, ddW, d1, d2, args):
"""
Drift implicit Taylor 1.5 (alpha = 1/2, beta = doesn't matter)
Chaptert 12.2 Eq. (2.18) in Numerical Solution of Stochastic Differential Equations
By Peter E. Kloeden, Eckhard Platen
"""
dW = ddW[:, 0]
A = A[0]
# reusable operators and traces
a = A[-1] * rho_t
e0 = cy_expect_rho_vec(A[0], rho_t, 1)
b = A[0] * rho_t - e0 * rho_t
TrAb = cy_expect_rho_vec(A[0], b, 1)
Lb = A[0] * b - TrAb * rho_t - e0 * b
TrALb = cy_expect_rho_vec(A[0], Lb, 1)
TrAa = cy_expect_rho_vec(A[0], a, 1)
drho_t = b * dW[0]
drho_t += Lb * dW[1] # Milstein term
xx0 = (
drho_t + a * dt
) + rho_t # starting vector for the linear solver (Milstein prediction)
drho_t += (0.5 * dt) * a
# new terms:
drho_t += A[-1] * b * (dW[2] - 0.5 * dW[0] * dt)
drho_t += (A[0] * a - TrAa * rho_t - e0 * a - TrAb * b) * dW[3]
drho_t += (A[0] * Lb - TrALb * rho_t - (2 * TrAb) * b - e0 * Lb) * dW[4]
drho_t += rho_t
v, check = sp.linalg.bicgstab(A[-2], drho_t, x0=xx0, tol=args["tol"])
return v
|
https://github.com/qutip/qutip/issues/862
|
..................................................
======================================================================
ERROR: Steady state: Thermal qubit - power-gmres solver
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/Users/shahnawaz/dev/qutip/qutip/tests/test_steadystate.py", line 145, in test_qubit_power_gmres
rho_ss = steadystate(H, c_op_list, method='power-gmres')
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 298, in steadystate
return _steadystate_power(A, ss_args)
File "/Users/shahnawaz/dev/qutip/qutip/steadystate.py", line 863, in _steadystate_power
v = v / la.norm(v, np.inf)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/scipy/linalg/misc.py", line 137, in norm
a = np.asarray_chkfinite(a)
File "/Users/shahnawaz/miniconda3/lib/python3.6/site-packages/numpy/lib/function_base.py", line 1233, in asarray_chkfinite
"array must not contain infs or NaNs")
ValueError: array must not contain infs or NaNs
----------------------------------------------------------------------
Ran 561 tests in 576.175s
FAILED (SKIP=19, errors=1)
|
ValueError
|
def _blas_info():
config = np.__config__
blas_info = config.blas_opt_info
_has_lib_key = "libraries" in blas_info.keys()
blas = None
if hasattr(config, "mkl_info") or (
_has_lib_key and any("mkl" in lib for lib in blas_info["libraries"])
):
blas = "INTEL MKL"
elif hasattr(config, "openblas_info") or (
_has_lib_key and any("openblas" in lib for lib in blas_info["libraries"])
):
blas = "OPENBLAS"
elif "extra_link_args" in blas_info.keys() and (
"-Wl,Accelerate" in blas_info["extra_link_args"]
):
blas = "Accelerate"
else:
blas = "Generic"
return blas
|
def _blas_info():
config = np.__config__
blas_info = config.blas_opt_info
blas = None
if hasattr(config, "mkl_info") or any(
"mkl" in lib for lib in blas_info["libraries"]
):
blas = "INTEL MKL"
elif hasattr(config, "openblas_info") or any(
"openblas" in lib for lib in blas_info["libraries"]
):
blas = "OPENBLAS"
elif "extra_link_args" in blas_info.keys() and (
"-Wl,Accelerate" in blas_info["extra_link_args"]
):
blas = "Accelerate"
else:
blas = "Generic"
return blas
|
https://github.com/qutip/qutip/issues/552
|
Python 3.5.2 (v3.5.2:4def2a2901a5, Jun 26 2016, 10:47:25)
[GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
import qutip
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/oliviadimatteo/tomo_test/qutip/qutip/__init__.py", line 174, in <module>
import qutip._mkl
File "/Users/oliviadimatteo/tomo_test/qutip/qutip/_mkl/__init__.py", line 3, in <module>
_set_mkl()
File "/Users/oliviadimatteo/tomo_test/qutip/qutip/_mkl/utilities.py", line 47, in _set_mkl
if _blas_info() == 'INTEL MKL':
File "/Users/oliviadimatteo/tomo_test/qutip/qutip/utilities.py", line 405, in _blas_info
if hasattr(config,'mkl_info') or any('mkl' in lib for lib in blas_info['libraries']):
KeyError: 'libraries'
|
KeyError
|
def __init__(
self,
inpt=None,
dims=[[], []],
shape=[],
type=None,
isherm=None,
fast=False,
superrep=None,
):
"""
Qobj constructor.
"""
self._isherm = None
self._type = None
self.superrep = None
if fast == "mc":
# fast Qobj construction for use in mcsolve with ket output
self.data = sp.csr_matrix(inpt, dtype=complex)
self.dims = dims
self.shape = shape
self._isherm = False
return
if fast == "mc-dm":
# fast Qobj construction for use in mcsolve with dm output
self.data = sp.csr_matrix(inpt, dtype=complex)
self.dims = dims
self.shape = shape
self._isherm = True
return
if isinstance(inpt, Qobj):
# if input is already Qobj then return identical copy
# make sure matrix is sparse (safety check)
self.data = sp.csr_matrix(inpt.data, dtype=complex)
if not np.any(dims):
# Dimensions of quantum object used for keeping track of tensor
# components
self.dims = inpt.dims
else:
self.dims = dims
if not np.any(shape):
# Shape of undelying quantum obejct data matrix
self.shape = inpt.shape
else:
self.shape = shape
self.superrep = inpt.superrep
elif inpt is None:
# initialize an empty Qobj with correct dimensions and shape
if any(dims):
N, M = np.prod(dims[0]), np.prod(dims[1])
self.dims = dims
elif shape:
N, M = shape
self.dims = [[N], [M]]
else:
N, M = 1, 1
self.dims = [[N], [M]]
self.shape = [N, M]
self.data = sp.csr_matrix((N, M), dtype=complex)
elif isinstance(inpt, list) or isinstance(inpt, tuple):
# case where input is a list
if len(np.array(inpt).shape) == 1:
# if list has only one dimension (i.e [5,4])
inpt = np.array([inpt]).transpose()
else: # if list has two dimensions (i.e [[5,4]])
inpt = np.array(inpt)
self.data = sp.csr_matrix(inpt, dtype=complex)
if not np.any(dims):
self.dims = [[int(inpt.shape[0])], [int(inpt.shape[1])]]
else:
self.dims = dims
if not np.any(shape):
self.shape = [int(inpt.shape[0]), int(inpt.shape[1])]
else:
self.shape = shape
elif isinstance(inpt, np.ndarray) or sp.issparse(inpt):
# case where input is array or sparse
if inpt.ndim == 1:
inpt = inpt[:, np.newaxis]
self.data = sp.csr_matrix(inpt, dtype=complex)
if not np.any(dims):
self.dims = [[int(inpt.shape[0])], [int(inpt.shape[1])]]
else:
self.dims = dims
if not np.any(shape):
self.shape = [int(inpt.shape[0]), int(inpt.shape[1])]
else:
self.shape = shape
elif isinstance(inpt, (int, float, complex, np.int64)):
# if input is int, float, or complex then convert to array
self.data = sp.csr_matrix([[inpt]], dtype=complex)
if not np.any(dims):
self.dims = [[1], [1]]
else:
self.dims = dims
if not np.any(shape):
self.shape = [1, 1]
else:
self.shape = shape
else:
warnings.warn("Initializing Qobj from unsupported type")
inpt = np.array([[0]])
self.data = sp.csr_matrix(inpt, dtype=complex)
self.dims = [[int(inpt.shape[0])], [int(inpt.shape[1])]]
self.shape = [int(inpt.shape[0]), int(inpt.shape[1])]
# Signifies if quantum object corresponds to Hermitian operator
if isherm is None:
if qset.auto_herm:
self._isherm = self.isherm
else:
self._isherm = None
else:
self._isherm = isherm
if type == "super":
if self.type == "oper":
self.dims = [[[d] for d in self.dims[0]], [[d] for d in self.dims[1]]]
if superrep:
self.superrep = superrep
else:
if self.type == "super" and self.superrep is None:
self.superrep = "super"
|
def __init__(
self,
inpt=None,
dims=[[], []],
shape=[],
type=None,
isherm=None,
fast=False,
superrep=None,
):
"""
Qobj constructor.
"""
self._isherm = None
if fast == "mc":
# fast Qobj construction for use in mcsolve with ket output
self.data = sp.csr_matrix(inpt, dtype=complex)
self.dims = dims
self.shape = shape
self._isherm = False
self.type = "ket"
return
if fast == "mc-dm":
# fast Qobj construction for use in mcsolve with dm output
self.data = sp.csr_matrix(inpt, dtype=complex)
self.dims = dims
self.shape = shape
self._isherm = True
self.type = "oper"
return
if isinstance(inpt, Qobj):
# if input is already Qobj then return identical copy
# make sure matrix is sparse (safety check)
self.data = sp.csr_matrix(inpt.data, dtype=complex)
if not np.any(dims):
# Dimensions of quantum object used for keeping track of tensor
# components
self.dims = inpt.dims
else:
self.dims = dims
if not np.any(shape):
# Shape of undelying quantum obejct data matrix
self.shape = inpt.shape
else:
self.shape = shape
elif inpt is None:
# initialize an empty Qobj with correct dimensions and shape
if any(dims):
N, M = np.prod(dims[0]), np.prod(dims[1])
self.dims = dims
elif shape:
N, M = shape
self.dims = [[N], [M]]
else:
N, M = 1, 1
self.dims = [[N], [M]]
self.shape = [N, M]
self.data = sp.csr_matrix((N, M), dtype=complex)
elif isinstance(inpt, list) or isinstance(inpt, tuple):
# case where input is a list
if len(np.array(inpt).shape) == 1:
# if list has only one dimension (i.e [5,4])
inpt = np.array([inpt]).transpose()
else: # if list has two dimensions (i.e [[5,4]])
inpt = np.array(inpt)
self.data = sp.csr_matrix(inpt, dtype=complex)
if not np.any(dims):
self.dims = [[int(inpt.shape[0])], [int(inpt.shape[1])]]
else:
self.dims = dims
if not np.any(shape):
self.shape = [int(inpt.shape[0]), int(inpt.shape[1])]
else:
self.shape = shape
elif isinstance(inpt, np.ndarray) or sp.issparse(inpt):
# case where input is array or sparse
if inpt.ndim == 1:
inpt = inpt[:, np.newaxis]
self.data = sp.csr_matrix(inpt, dtype=complex)
if not np.any(dims):
self.dims = [[int(inpt.shape[0])], [int(inpt.shape[1])]]
else:
self.dims = dims
if not np.any(shape):
self.shape = [int(inpt.shape[0]), int(inpt.shape[1])]
else:
self.shape = shape
elif isinstance(inpt, (int, float, complex, np.int64)):
# if input is int, float, or complex then convert to array
self.data = sp.csr_matrix([[inpt]], dtype=complex)
if not np.any(dims):
self.dims = [[1], [1]]
else:
self.dims = dims
if not np.any(shape):
self.shape = [1, 1]
else:
self.shape = shape
else:
warnings.warn("Initializing Qobj from unsupported type")
inpt = np.array([[0]])
self.data = sp.csr_matrix(inpt, dtype=complex)
self.dims = [[int(inpt.shape[0])], [int(inpt.shape[1])]]
self.shape = [int(inpt.shape[0]), int(inpt.shape[1])]
# Signifies if quantum object corresponds to Hermitian operator
if isherm is None:
if qset.auto_herm:
self._isherm = self.isherm
else:
self._isherm = None
else:
self._isherm = isherm
# Signifies if quantum object corresponds to a ket, bra, operator, or
# super-operator
if type is None:
self.type = _typecheck(self)
else:
self.type = type
if self.type == "super":
self.superrep = superrep if superrep else "super"
else:
self.superrep = None
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def __add__(self, other): # defines left addition for Qobj class
"""
ADDITION with Qobj on LEFT [ ex. Qobj+4 ]
"""
if _checkeseries(other) == "eseries":
return other.__radd__(self)
if not isinstance(other, Qobj):
other = Qobj(other)
if np.prod(other.shape) == 1 and np.prod(self.shape) != 1:
# case for scalar quantum object
dat = np.array(other.full())[0][0]
if dat == 0:
return self
out = Qobj()
if self.type in ["oper", "super"]:
out.data = self.data + dat * sp.identity(
self.shape[0], dtype=complex, format="csr"
)
else:
out.data = self.data
out.data.data = out.data.data + dat
out.dims = self.dims
out.shape = self.shape
if isinstance(dat, (int, float)):
out._isherm = self._isherm
else:
out._isherm = out.isherm
return out.tidyup() if qset.auto_tidyup else out
elif np.prod(self.shape) == 1 and np.prod(other.shape) != 1:
# case for scalar quantum object
dat = np.array(self.full())[0][0]
if dat == 0:
return other
out = Qobj()
if other.type in ["oper", "super"]:
out.data = (
dat * sp.identity(other.shape[0], dtype=complex, format="csr")
+ other.data
)
else:
out.data = other.data
out.data.data = out.data.data + dat
out.dims = other.dims
out.shape = other.shape
if isinstance(dat, (int, float)):
out._isherm = self._isherm
else:
out._isherm = out.isherm
return out.tidyup() if qset.auto_tidyup else out
elif self.dims != other.dims:
raise TypeError("Incompatible quantum object dimensions")
elif self.shape != other.shape:
raise TypeError("Matrix shapes do not match")
else: # case for matching quantum objects
out = Qobj()
out.data = self.data + other.data
out.dims = self.dims
out.shape = self.shape
if self.type in ["ket", "bra", "super"]:
out._isherm = False
elif self._isherm and self._isherm == other._isherm:
out._isherm = True
elif self._isherm and not other._isherm:
out._isherm = False
elif not self._isherm and other._isherm:
out._isherm = False
else:
out._isherm = out.isherm
return out.tidyup() if qset.auto_tidyup else out
|
def __add__(self, other): # defines left addition for Qobj class
"""
ADDITION with Qobj on LEFT [ ex. Qobj+4 ]
"""
if _checkeseries(other) == "eseries":
return other.__radd__(self)
if not isinstance(other, Qobj):
other = Qobj(other)
if np.prod(other.shape) == 1 and np.prod(self.shape) != 1:
# case for scalar quantum object
dat = np.array(other.full())[0][0]
if dat == 0:
return self
out = Qobj(type=self.type)
if self.type in ["oper", "super"]:
out.data = self.data + dat * sp.identity(
self.shape[0], dtype=complex, format="csr"
)
else:
out.data = self.data
out.data.data = out.data.data + dat
out.dims = self.dims
out.shape = self.shape
if isinstance(dat, (int, float)):
out._isherm = self._isherm
else:
out._isherm = out.isherm
return out.tidyup() if qset.auto_tidyup else out
elif np.prod(self.shape) == 1 and np.prod(other.shape) != 1:
# case for scalar quantum object
dat = np.array(self.full())[0][0]
if dat == 0:
return other
out = Qobj()
if other.type in ["oper", "super"]:
out.data = (
dat * sp.identity(other.shape[0], dtype=complex, format="csr")
+ other.data
)
else:
out.data = other.data
out.data.data = out.data.data + dat
out.dims = other.dims
out.shape = other.shape
if isinstance(dat, (int, float)):
out._isherm = self._isherm
else:
out._isherm = out.isherm
return out.tidyup() if qset.auto_tidyup else out
elif self.dims != other.dims:
raise TypeError("Incompatible quantum object dimensions")
elif self.shape != other.shape:
raise TypeError("Matrix shapes do not match")
else: # case for matching quantum objects
out = Qobj(type=self.type)
out.data = self.data + other.data
out.dims = self.dims
out.shape = self.shape
if self.type in ["ket", "bra", "super"]:
out._isherm = False
elif self._isherm and self._isherm == other._isherm:
out._isherm = True
elif self._isherm and not other._isherm:
out._isherm = False
elif not self._isherm and other._isherm:
out._isherm = False
else:
out._isherm = out.isherm
return out.tidyup() if qset.auto_tidyup else out
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def __mul__(self, other):
"""
MULTIPLICATION with Qobj on LEFT [ ex. Qobj*4 ]
"""
if isinstance(other, Qobj):
if self.shape[1] == other.shape[0] and self.dims[1] == other.dims[0]:
out = Qobj()
out.data = self.data * other.data
dims = [self.dims[0], other.dims[1]]
out.dims = dims
if not isinstance(dims[0][0], list) and not isinstance(dims[1][0], list):
r = range(len(dims[0]))
mask = [dims[0][n] == dims[1][n] == 1 for n in r]
out.dims = [
max([1], [dims[0][n] for n in r if not mask[n]]),
max([1], [dims[1][n] for n in r if not mask[n]]),
]
else:
out.dims = dims
out.shape = [self.shape[0], other.shape[1]]
out.superrep = self.superrep # XXX
out._isherm = out.isherm
return out.tidyup() if qset.auto_tidyup else out
elif np.prod(self.shape) == 1:
out = Qobj(other)
out.data *= self.data[0, 0]
return out.tidyup() if qset.auto_tidyup else out
elif np.prod(other.shape):
out = Qobj(self)
out.data *= other.data[0, 0]
return out.tidyup() if qset.auto_tidyup else out
else:
raise TypeError("Incompatible Qobj shapes")
elif isinstance(other, (list, np.ndarray)):
# if other is a list, do element-wise multiplication
return np.array([self * item for item in other])
elif _checkeseries(other) == "eseries":
return other.__rmul__(self)
elif isinstance(other, (int, float, complex, np.int64)):
out = Qobj()
out.data = self.data * other
out.dims = self.dims
out.shape = self.shape
if isinstance(other, complex):
out._isherm = out.isherm
else:
out._isherm = self._isherm
return out.tidyup() if qset.auto_tidyup else out
else:
raise TypeError("Incompatible object for multiplication")
|
def __mul__(self, other):
"""
MULTIPLICATION with Qobj on LEFT [ ex. Qobj*4 ]
"""
if isinstance(other, Qobj):
if self.shape[1] == other.shape[0] and self.dims[1] == other.dims[0]:
out = Qobj()
out.data = self.data * other.data
dims = [self.dims[0], other.dims[1]]
out.dims = dims
if not isinstance(dims[0][0], list):
r = range(len(dims[0]))
mask = [dims[0][n] == dims[1][n] == 1 for n in r]
out.dims = [
max([1], [dims[0][n] for n in r if not mask[n]]),
max([1], [dims[1][n] for n in r if not mask[n]]),
]
else:
out.dims = dims
out.shape = [self.shape[0], other.shape[1]]
out.type = _typecheck(out)
out._isherm = out.isherm
return out.tidyup() if qset.auto_tidyup else out
elif self.shape[0] == 1 and self.shape[1] == 1:
out = Qobj(other)
out.data *= self.data[0, 0]
return out.tidyup() if qset.auto_tidyup else out
elif other.shape[0] == 1 and other.shape[1] == 1:
out = Qobj(self)
out.data *= other.data[0, 0]
return out.tidyup() if qset.auto_tidyup else out
else:
raise TypeError("Incompatible Qobj shapes")
elif isinstance(other, (list, np.ndarray)):
# if other is a list, do element-wise multiplication
return np.array([self * item for item in other])
elif _checkeseries(other) == "eseries":
return other.__rmul__(self)
elif isinstance(other, (int, float, complex, np.int64)):
out = Qobj(type=self.type)
out.data = self.data * other
out.dims = self.dims
out.shape = self.shape
if isinstance(other, complex):
out._isherm = out.isherm
else:
out._isherm = self._isherm
return out.tidyup() if qset.auto_tidyup else out
else:
raise TypeError("Incompatible object for multiplication")
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def __rmul__(self, other):
"""
MULTIPLICATION with Qobj on RIGHT [ ex. 4*Qobj ]
"""
if isinstance(other, Qobj): # if both are quantum objects
if self.shape[1] == other.shape[0] and self.dims[1] == other.dims[0]:
out = Qobj()
out.data = other.data * self.data
out.dims = self.dims
out.shape = [self.shape[0], other.shape[1]]
out._isherm = out.isherm
return out.tidyup() if qset.auto_tidyup else out
else:
raise TypeError("Incompatible Qobj shapes")
if isinstance(other, (list, np.ndarray)):
# if other is a list, do element-wise multiplication
return np.array([item * self for item in other])
if _checkeseries(other) == "eseries":
return other.__mul__(self)
if isinstance(other, (int, float, complex, np.int64)):
out = Qobj()
out.data = other * self.data
out.dims = self.dims
out.shape = self.shape
if isinstance(other, (int, float, np.int64)):
out._isherm = self._isherm
else:
out._isherm = out.isherm
return out.tidyup() if qset.auto_tidyup else out
else:
raise TypeError("Incompatible object for multiplication")
|
def __rmul__(self, other):
"""
MULTIPLICATION with Qobj on RIGHT [ ex. 4*Qobj ]
"""
if isinstance(other, Qobj): # if both are quantum objects
if self.shape[1] == other.shape[0] and self.dims[1] == other.dims[0]:
out = Qobj()
out.data = other.data * self.data
out.dims = self.dims
out.shape = [self.shape[0], other.shape[1]]
out.type = _typecheck(out)
out._isherm = out.isherm
return out.tidyup() if qset.auto_tidyup else out
else:
raise TypeError("Incompatible Qobj shapes")
if isinstance(other, (list, np.ndarray)):
# if other is a list, do element-wise multiplication
return np.array([item * self for item in other])
if _checkeseries(other) == "eseries":
return other.__mul__(self)
if isinstance(other, (int, float, complex, np.int64)):
out = Qobj(type=self.type)
out.data = other * self.data
out.dims = self.dims
out.shape = self.shape
if isinstance(other, (int, float, np.int64)):
out._isherm = self._isherm
else:
out._isherm = out.isherm
return out.tidyup() if qset.auto_tidyup else out
else:
raise TypeError("Incompatible object for multiplication")
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def __div__(self, other):
"""
DIVISION (by numbers only)
"""
if isinstance(other, Qobj): # if both are quantum objects
raise TypeError(
"Incompatible Qobj shapes " + "[division with Qobj not implemented]"
)
if isinstance(other, (int, float, complex, np.int64)):
out = Qobj()
out.data = self.data / other
out.dims = self.dims
out.shape = self.shape
if isinstance(other, (int, float, np.int64)):
out._isherm = self._isherm
else:
out._isherm = out.isherm
return out.tidyup() if qset.auto_tidyup else out
else:
raise TypeError("Incompatible object for division")
|
def __div__(self, other):
"""
DIVISION (by numbers only)
"""
if isinstance(other, Qobj): # if both are quantum objects
raise TypeError(
"Incompatible Qobj shapes " + "[division with Qobj not implemented]"
)
if isinstance(other, (int, float, complex, np.int64)):
out = Qobj(type=self.type)
out.data = self.data / other
out.dims = self.dims
out.shape = self.shape
if isinstance(other, (int, float, np.int64)):
out._isherm = self._isherm
else:
out._isherm = out.isherm
return out.tidyup() if qset.auto_tidyup else out
else:
raise TypeError("Incompatible object for division")
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def __neg__(self):
"""
NEGATION operation.
"""
out = Qobj()
out.data = -self.data
out.dims = self.dims
out.shape = self.shape
out.superrep = self.superrep
out._isherm = self._isherm
return out.tidyup() if qset.auto_tidyup else out
|
def __neg__(self):
"""
NEGATION operation.
"""
out = Qobj()
out.data = -self.data
out.dims = self.dims
out.shape = self.shape
out.type = self.type
out._isherm = self._isherm
return out.tidyup() if qset.auto_tidyup else out
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def __str__(self):
s = ""
if self.type in ["oper", "super"]:
s += (
"Quantum object: "
+ "dims = "
+ str(self.dims)
+ ", shape = "
+ str(self.shape)
+ ", type = "
+ self.type
+ ", isherm = "
+ str(self._isherm)
+ (
", superrep = {0.superrep}".format(self)
if self.type == "super" and self.superrep != "super"
else ""
)
+ "\n"
)
else:
s += (
"Quantum object: "
+ "dims = "
+ str(self.dims)
+ ", shape = "
+ str(self.shape)
+ ", type = "
+ self.type
+ "\n"
)
s += "Qobj data =\n"
if self.shape[0] > 10000 or self.shape[1] > 10000:
# if the system is huge, don't attempt to convert to a
# dense matrix and then to string, because it is pointless
# and is likely going to produce memory errors. Instead print the
# sparse data string representation
s += str(self.data)
elif all(np.imag(self.data.data) == 0):
s += str(np.real(self.full()))
else:
s += str(self.full())
return s
|
def __str__(self):
s = ""
if self.type == "oper" or self.type == "super":
s += (
"Quantum object: "
+ "dims = "
+ str(self.dims)
+ ", shape = "
+ str(self.shape)
+ ", type = "
+ self.type
+ ", isherm = "
+ str(self._isherm)
+ (
", superrep = {0.superrep}".format(self)
if self.type == "super" and self.superrep != "super"
else ""
)
+ "\n"
)
else:
s += (
"Quantum object: "
+ "dims = "
+ str(self.dims)
+ ", shape = "
+ str(self.shape)
+ ", type = "
+ self.type
+ "\n"
)
s += "Qobj data =\n"
if self.shape[0] > 10000 or self.shape[1] > 10000:
# if the system is huge, don't attempt to convert to a
# dense matrix and then to string, because it is pointless
# and is likely going to produce memory errors. Instead print the
# sparse data string representation
s += str(self.data)
elif all(np.imag(self.data.data) == 0):
s += str(np.real(self.full()))
else:
s += str(self.full())
return s
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def _repr_latex_(self):
"""
Generate a LaTeX representation of the Qobj instance. Can be used for
formatted output in ipython notebook.
"""
s = r"$\text{"
if self.type in ["oper", "super"]:
s += (
"Quantum object: "
+ "dims = "
+ str(self.dims)
+ ", shape = "
+ str(self.shape)
+ ", type = "
+ self.type
+ ", isherm = "
+ str(self._isherm)
+ (
", superrep = {0.superrep}".format(self)
if self.type == "super" and self.superrep != "super"
else ""
)
)
else:
s += (
"Quantum object: "
+ "dims = "
+ str(self.dims)
+ ", shape = "
+ str(self.shape)
+ ", type = "
+ self.type
)
s += r"}\\[1em]"
M, N = self.data.shape
s += r"\begin{pmatrix}"
def _format_float(value):
if value == 0.0:
return "0.0"
elif abs(value) > 1000.0 or abs(value) < 0.001:
return ("%.3e" % value).replace("e", r"\times10^{") + "}"
elif abs(value - int(value)) < 0.001:
return "%.1f" % value
else:
return "%.3f" % value
def _format_element(m, n, d):
s = " & " if n > 0 else ""
if type(d) == str:
return s + d
else:
if abs(np.imag(d)) < 1e-12:
return s + _format_float(np.real(d))
elif abs(np.real(d)) < 1e-12:
return s + _format_float(np.imag(d)) + "j"
else:
s_re = _format_float(np.real(d))
s_im = _format_float(np.imag(d))
if np.imag(d) > 0.0:
return s + "(" + s_re + "+" + s_im + "j)"
else:
return s + "(" + s_re + s_im + "j)"
if M > 10 and N > 10:
# truncated matrix output
for m in range(5):
for n in range(5):
s += _format_element(m, n, self.data[m, n])
s += r" & \cdots"
for n in range(N - 5, N):
s += _format_element(m, n, self.data[m, n])
s += r"\\"
for n in range(5):
s += _format_element(m, n, r"\vdots")
s += r" & \ddots"
for n in range(N - 5, N):
s += _format_element(m, n, r"\vdots")
s += r"\\"
for m in range(M - 5, M):
for n in range(5):
s += _format_element(m, n, self.data[m, n])
s += r" & \cdots"
for n in range(N - 5, N):
s += _format_element(m, n, self.data[m, n])
s += r"\\"
elif M > 10 and N == 1:
# truncated column vector output
for m in range(5):
s += _format_element(m, 0, self.data[m, 0])
s += r"\\"
s += _format_element(m, 0, r"\vdots")
s += r"\\"
for m in range(M - 5, M):
s += _format_element(m, 0, self.data[m, 0])
s += r"\\"
elif M == 1 and N > 10:
# truncated row vector output
for n in range(5):
s += _format_element(0, n, self.data[0, n])
s += r" & \cdots"
for n in range(N - 5, N):
s += _format_element(0, n, self.data[0, n])
s += r"\\"
else:
# full output
for m in range(M):
for n in range(N):
s += _format_element(m, n, self.data[m, n])
s += r"\\"
s += r"\end{pmatrix}$"
return s
|
def _repr_latex_(self):
"""
Generate a LaTeX representation of the Qobj instance. Can be used for
formatted output in ipython notebook.
"""
s = r"$\text{"
if self.type == "oper" or self.type == "super":
s += (
"Quantum object: "
+ "dims = "
+ str(self.dims)
+ ", shape = "
+ str(self.shape)
+ ", type = "
+ self.type
+ ", isherm = "
+ str(self._isherm)
+ (
", superrep = {0.superrep}".format(self)
if self.type == "super" and self.superrep != "super"
else ""
)
)
else:
s += (
"Quantum object: "
+ "dims = "
+ str(self.dims)
+ ", shape = "
+ str(self.shape)
+ ", type = "
+ self.type
)
s += r"}\\[1em]"
M, N = self.data.shape
s += r"\begin{pmatrix}"
def _format_float(value):
if value == 0.0:
return "0.0"
elif abs(value) > 1000.0 or abs(value) < 0.001:
return ("%.3e" % value).replace("e", r"\times10^{") + "}"
elif abs(value - int(value)) < 0.001:
return "%.1f" % value
else:
return "%.3f" % value
def _format_element(m, n, d):
s = " & " if n > 0 else ""
if type(d) == str:
return s + d
else:
if abs(np.imag(d)) < 1e-12:
return s + _format_float(np.real(d))
elif abs(np.real(d)) < 1e-12:
return s + _format_float(np.imag(d)) + "j"
else:
s_re = _format_float(np.real(d))
s_im = _format_float(np.imag(d))
if np.imag(d) > 0.0:
return s + "(" + s_re + "+" + s_im + "j)"
else:
return s + "(" + s_re + s_im + "j)"
if M > 10 and N > 10:
# truncated matrix output
for m in range(5):
for n in range(5):
s += _format_element(m, n, self.data[m, n])
s += r" & \cdots"
for n in range(N - 5, N):
s += _format_element(m, n, self.data[m, n])
s += r"\\"
for n in range(5):
s += _format_element(m, n, r"\vdots")
s += r" & \ddots"
for n in range(N - 5, N):
s += _format_element(m, n, r"\vdots")
s += r"\\"
for m in range(M - 5, M):
for n in range(5):
s += _format_element(m, n, self.data[m, n])
s += r" & \cdots"
for n in range(N - 5, N):
s += _format_element(m, n, self.data[m, n])
s += r"\\"
elif M > 10 and N == 1:
# truncated column vector output
for m in range(5):
s += _format_element(m, 0, self.data[m, 0])
s += r"\\"
s += _format_element(m, 0, r"\vdots")
s += r"\\"
for m in range(M - 5, M):
s += _format_element(m, 0, self.data[m, 0])
s += r"\\"
elif M == 1 and N > 10:
# truncated row vector output
for n in range(5):
s += _format_element(0, n, self.data[0, n])
s += r" & \cdots"
for n in range(N - 5, N):
s += _format_element(0, n, self.data[0, n])
s += r"\\"
else:
# full output
for m in range(M):
for n in range(N):
s += _format_element(m, n, self.data[m, n])
s += r"\\"
s += r"\end{pmatrix}$"
return s
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def dag(self):
"""Adjoint operator of quantum object."""
out = Qobj()
out.data = self.data.T.conj().tocsr()
out.dims = [self.dims[1], self.dims[0]]
out.shape = [self.shape[1], self.shape[0]]
out._isherm = self._isherm
return out
|
def dag(self):
"""Adjoint operator of quantum object."""
out = Qobj()
out.data = self.data.T.conj().tocsr()
out.dims = [self.dims[1], self.dims[0]]
out.shape = [self.shape[1], self.shape[0]]
out._isherm = self._isherm
out.type = _typecheck(out)
return out
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def conj(self):
"""Conjugate operator of quantum object."""
out = Qobj()
out.data = self.data.conj()
out.dims = [self.dims[1], self.dims[0]]
out.shape = [self.shape[1], self.shape[0]]
return out
|
def conj(self):
"""Conjugate operator of quantum object."""
out = Qobj(type=self.type)
out.data = self.data.conj()
out.dims = [self.dims[1], self.dims[0]]
out.shape = [self.shape[1], self.shape[0]]
return out
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def norm(self, norm=None, sparse=False, tol=0, maxiter=100000):
"""Norm of a quantum object.
Default norm is L2-norm for kets and trace-norm for operators.
Other ket and operator norms may be specified using the
`ket_norm` and `oper_norm` arguments.
Parameters
----------
norm : str
Which norm to use for ket/bra vectors: L2 'l2', max norm 'max',
or for operators: trace 'tr', Frobius 'fro', one 'one', or max 'max'.
sparse : bool
Use sparse eigenvalue solver for trace norm. Other norms are not
affected by this parameter.
tol : float
Tolerance for sparse solver (if used) for trace norm. The sparse
solver may not converge if the tolerance is set too low.
maxiter : int
Maximum number of iterations performed by sparse solver (if used)
for trace norm.
Returns
-------
norm : float
The requested norm of the operator or state quantum object.
Notes
-----
The sparse eigensolver is much slower than the dense version.
Use sparse only if memory requirements demand it.
"""
if self.type in ["oper", "super"]:
if norm is None or norm == "tr":
vals = sp_eigs(self, vecs=False, sparse=sparse, tol=tol, maxiter=maxiter)
return np.sum(sqrt(abs(vals) ** 2))
elif norm == "fro":
return _sp_fro_norm(self)
elif norm == "one":
return _sp_one_norm(self)
elif norm == "max":
return _sp_max_norm(self)
else:
raise ValueError("Operator norm must be 'tr', 'fro', 'one', or 'max'.")
else:
if norm == None:
norm = "l2"
if norm == "l2":
return _sp_L2_norm(self)
elif norm == "max":
return _sp_max_norm(self)
else:
raise ValueError("Ket norm must be 'l2', or 'max'.")
|
def norm(self, norm=None, sparse=False, tol=0, maxiter=100000):
"""Norm of a quantum object.
Default norm is L2-norm for kets and trace-norm for operators.
Other ket and operator norms may be specified using the
`ket_norm` and `oper_norm` arguments.
Parameters
----------
norm : str
Which norm to use for ket/bra vectors: L2 'l2', max norm 'max',
or for operators: trace 'tr', Frobius 'fro', one 'one', or max 'max'.
sparse : bool
Use sparse eigenvalue solver for trace norm. Other norms are not
affected by this parameter.
tol : float
Tolerance for sparse solver (if used) for trace norm. The sparse
solver may not converge if the tolerance is set too low.
maxiter : int
Maximum number of iterations performed by sparse solver (if used)
for trace norm.
Returns
-------
norm : float
The requested norm of the operator or state quantum object.
Notes
-----
The sparse eigensolver is much slower than the dense version.
Use sparse only if memory requirements demand it.
"""
if self.type == "oper" or self.type == "super":
if norm is None or norm == "tr":
vals = sp_eigs(self, vecs=False, sparse=sparse, tol=tol, maxiter=maxiter)
return np.sum(sqrt(abs(vals) ** 2))
elif norm == "fro":
return _sp_fro_norm(self)
elif norm == "one":
return _sp_one_norm(self)
elif norm == "max":
return _sp_max_norm(self)
else:
raise ValueError("Operator norm must be 'tr', 'fro', 'one', or 'max'.")
else:
if norm == None:
norm = "l2"
if norm == "l2":
return _sp_L2_norm(self)
elif norm == "max":
return _sp_max_norm(self)
else:
raise ValueError("Ket norm must be 'l2', or 'max'.")
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def transform(self, inpt, inverse=False):
"""Basis transform defined by input array.
Input array can be a ``matrix`` defining the transformation,
or a ``list`` of kets that defines the new basis.
Parameters
----------
inpt : array_like
A ``matrix`` or ``list`` of kets defining the transformation.
inverse : bool
Whether to return inverse transformation.
Returns
-------
oper : qobj
Operator in new basis.
Notes
-----
This function is still in development.
"""
if isinstance(inpt, list) or isinstance(inpt, np.ndarray):
if len(inpt) != max(self.shape):
raise TypeError("Invalid size of ket list for basis transformation")
S = np.matrix(np.hstack([psi.full() for psi in inpt])).H
elif isinstance(inpt, np.ndarray):
S = np.matrix(inpt)
else:
raise TypeError("Invalid operand for basis transformation")
# normalize S just in case the supplied basis states aren't normalized
# S = S/la.norm(S)
out = Qobj(dims=self.dims, shape=self.shape)
out._isherm = self._isherm
out.superrep = self.superrep
# transform data
if inverse:
if isket(self):
out.data = S.H * self.data
elif isbra(self):
out.data = self.data * S
else:
out.data = S.H * self.data * S
else:
if isket(self):
out.data = S * self.data
elif isbra(self):
out.data = self.data * S.H
else:
out.data = S * self.data * S.H
# force sparse
out.data = sp.csr_matrix(out.data, dtype=complex)
return out
|
def transform(self, inpt, inverse=False):
"""Basis transform defined by input array.
Input array can be a ``matrix`` defining the transformation,
or a ``list`` of kets that defines the new basis.
Parameters
----------
inpt : array_like
A ``matrix`` or ``list`` of kets defining the transformation.
inverse : bool
Whether to return inverse transformation.
Returns
-------
oper : qobj
Operator in new basis.
Notes
-----
This function is still in development.
"""
if isinstance(inpt, list) or isinstance(inpt, np.ndarray):
if len(inpt) != max(self.shape):
raise TypeError("Invalid size of ket list for basis transformation")
S = np.matrix(np.hstack([psi.full() for psi in inpt])).H
elif isinstance(inpt, np.ndarray):
S = np.matrix(inpt)
else:
raise TypeError("Invalid operand for basis transformation")
# normalize S just in case the supplied basis states aren't normalized
# S = S/la.norm(S)
out = Qobj(type=self.type, dims=self.dims, shape=self.shape)
out._isherm = self._isherm
out.type = self.type
# transform data
if inverse:
if isket(self):
out.data = S.H * self.data
elif isbra(self):
out.data = self.data * S
else:
out.data = S.H * self.data * S
else:
if isket(self):
out.data = S * self.data
elif isbra(self):
out.data = self.data * S.H
else:
out.data = S * self.data * S.H
# force sparse
out.data = sp.csr_matrix(out.data, dtype=complex)
return out
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def trans(self):
"""Transposed operator.
Returns
-------
oper : qobj
Transpose of input operator.
"""
out = Qobj()
out.data = self.data.T.tocsr()
out.dims = [self.dims[1], self.dims[0]]
out.shape = [self.shape[1], self.shape[0]]
return out
|
def trans(self):
"""Transposed operator.
Returns
-------
oper : qobj
Transpose of input operator.
"""
out = Qobj()
out.data = self.data.T.tocsr()
out.dims = [self.dims[1], self.dims[0]]
out.shape = [self.shape[1], self.shape[0]]
out.type = _typecheck(out)
return out
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def iscp(self):
# FIXME: this needs to be cached in the same ways as isherm.
if self.type in ["super", "oper"]:
try:
q_oper = sr.to_choi(self)
eigs = q_oper.eigenenergies()
return all(eigs >= 0)
except:
return False
else:
return False
|
def iscp(self):
# FIXME: this needs to be cached in the same ways as isherm.
if self.type == "super" or self.type == "oper":
try:
q_oper = sr.to_choi(self)
eigs = q_oper.eigenenergies()
return all(eigs >= 0)
except:
return False
else:
return False
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def istp(self):
if self.type in ["super", "oper"]:
try:
q_oper = sr.to_choi(self)
# We use the condition from John Watrous' lecture notes,
# Tr_1(J(Phi)) = identity_2.
tr_oper = ptrace(q_oper, (0,))
ident = ops.identity(tr_oper.shape[0])
return isequal(tr_oper, ident)
except:
return False
else:
return False
|
def istp(self):
if self.type == "super" or self.type == "oper":
try:
q_oper = sr.to_choi(self)
# We use the condition from John Watrous' lecture notes,
# Tr_1(J(Phi)) = identity_2.
tr_oper = ptrace(q_oper, (0,))
ident = ops.identity(tr_oper.shape[0])
return isequal(tr_oper, ident)
except:
return False
else:
return False
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def iscptp(self):
if self.type in ["super", "oper"]:
q_oper = sr.to_choi(self)
return q_oper.iscp and q_oper.istp
else:
return False
|
def iscptp(self):
if self.type == "super" or self.type == "oper":
q_oper = sr.to_choi(self)
return q_oper.iscp and q_oper.istp
else:
return False
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def isbra(self):
return (
np.prod(self.dims[0]) == 1
and isinstance(self.dims[1], list)
and isinstance(self.dims[1][0], int)
)
|
def isbra(self):
return isinstance(self.dims[1], list) and np.prod(self.dims[0]) == 1
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def isket(self):
return (
np.prod(self.dims[1]) == 1
and isinstance(self.dims[0], list)
and isinstance(self.dims[0][0], int)
)
|
def isket(self):
return isinstance(self.dims[0], list) and np.prod(self.dims[1]) == 1
|
https://github.com/qutip/qutip/issues/96
|
rho_psi = operator_to_vector(Qobj(np.diag(np.array([0.9, 0.1], dtype=complex))))>>> E_psi = rho_psi.dag()
S = to_super(sigmax())
(E_psi * S) * rho_psi
Traceback (most recent call last):
File "<ipython-input-22-90cbfac2a43e>", line 1, in <module>
(E_psi * S) * rho_psi
File "qutip/qobj.py", line 416, in __mul__
raise TypeError("Incompatible Qobj shapes")
TypeError: Incompatible Qobj shapes
E_psi * (S * rho_psi)
Quantum object: dims = [[1], [1]], shape = [1, 1], type = oper, isherm = True
Qobj data =
[[ 0.18]]
E_psi
Quantum object: dims = [[1], [[2], [2]]], shape = [1, 4], type = bra
Qobj data =
[[ 0.9 0. 0. 0.1]]
rho_psi
Quantum object: dims = [[[2], [2]], [1]], shape = [4, 1], type = operator-vector
Qobj data =
[[ 0.9]
[ 0. ]
[ 0. ]
[ 0.1]]
S
Quantum object: dims = [[[2], [2]], [[2], [2]]], shape = [4, 4], type = super, isherm = True
Qobj data =
[[ 0. 0. 0. 1.]
[ 0. 0. 1. 0.]
[ 0. 1. 0. 0.]
[ 1. 0. 0. 0.]]
|
TypeError
|
def __init__(
self,
geometry,
settings,
chain_file=None,
prev_results=None,
diff_burnable_mats=False,
fission_q=None,
dilute_initial=1.0e3,
):
super().__init__(chain_file, fission_q, dilute_initial, prev_results)
self.round_number = False
self.prev_res = None
self.settings = settings
self.geometry = geometry
self.diff_burnable_mats = diff_burnable_mats
# Differentiate burnable materials with multiple instances
if self.diff_burnable_mats:
self._differentiate_burnable_mats()
# Clear out OpenMC, create task lists, distribute
openmc.reset_auto_ids()
self.burnable_mats, volume, nuclides = self._get_burnable_mats()
self.local_mats = _distribute(self.burnable_mats)
# Generate map from local materials => material index
self._mat_index_map = {lm: self.burnable_mats.index(lm) for lm in self.local_mats}
if self.prev_res is not None:
# Reload volumes into geometry
prev_results[-1].transfer_volumes(geometry)
# Store previous results in operator
# Distribute reaction rates according to those tracked
# on this process
if comm.size == 1:
self.prev_res = prev_results
else:
self.prev_res = ResultsList()
mat_indexes = _distribute(range(len(self.burnable_mats)))
for res_obj in prev_results:
new_res = res_obj.distribute(self.local_mats, mat_indexes)
self.prev_res.append(new_res)
# Determine which nuclides have incident neutron data
self.nuclides_with_data = self._get_nuclides_with_data()
# Select nuclides with data that are also in the chain
self._burnable_nucs = [
nuc.name for nuc in self.chain.nuclides if nuc.name in self.nuclides_with_data
]
# Extract number densities from the geometry / previous depletion run
self._extract_number(self.local_mats, volume, nuclides, self.prev_res)
# Create reaction rates array
self.reaction_rates = ReactionRates(
self.local_mats, self._burnable_nucs, self.chain.reactions
)
# Get classes to assist working with tallies
self._rate_helper = DirectReactionRateHelper(
self.reaction_rates.n_nuc, self.reaction_rates.n_react
)
self._energy_helper = ChainFissionHelper()
|
def __init__(
self,
geometry,
settings,
chain_file=None,
prev_results=None,
diff_burnable_mats=False,
fission_q=None,
dilute_initial=1.0e3,
):
super().__init__(chain_file, fission_q, dilute_initial, prev_results)
self.round_number = False
self.settings = settings
self.geometry = geometry
self.diff_burnable_mats = diff_burnable_mats
if self.prev_res is not None:
# Reload volumes into geometry
self.prev_results[-1].transfer_volumes(geometry)
# Differentiate burnable materials with multiple instances
if self.diff_burnable_mats:
self._differentiate_burnable_mats()
# Clear out OpenMC, create task lists, distribute
openmc.reset_auto_ids()
self.burnable_mats, volume, nuclides = self._get_burnable_mats()
self.local_mats = _distribute(self.burnable_mats)
# Generate map from local materials => material index
self._mat_index_map = {lm: self.burnable_mats.index(lm) for lm in self.local_mats}
# Determine which nuclides have incident neutron data
self.nuclides_with_data = self._get_nuclides_with_data()
# Select nuclides with data that are also in the chain
self._burnable_nucs = [
nuc.name for nuc in self.chain.nuclides if nuc.name in self.nuclides_with_data
]
# Extract number densities from the geometry / previous depletion run
self._extract_number(self.local_mats, volume, nuclides, self.prev_res)
# Create reaction rates array
self.reaction_rates = ReactionRates(
self.local_mats, self._burnable_nucs, self.chain.reactions
)
# Get classes to assist working with tallies
self._rate_helper = DirectReactionRateHelper(
self.reaction_rates.n_nuc, self.reaction_rates.n_react
)
self._energy_helper = ChainFissionHelper()
|
https://github.com/openmc-dev/openmc/issues/1275
|
Reading c_H_in_H2O from /home/drew/nndc_hdf5/c_H_in_H2O.h5
Maximum neutron transport energy: 20000000.000000 eV for U235
Reading tallies XML file...
Writing summary.h5 file...
Time to matexp: 0.1556260585784912
Traceback (most recent call last):
File "restart.py", line 20, in <module>
openmc.deplete.integrator.predictor(op, [time_steps[0]], power)
File "/home/drew/openmc/openmc/deplete/integrator/predictor.py", line 100, in predictor
op_results = [operator(x[0], power[-1])]
File "/home/drew/openmc/openmc/deplete/operator.py", line 175, in __call__
self.number.set_density(vec)
File "/home/drew/openmc/openmc/deplete/atom_number.py", line 214, in set_density
self.set_mat_slice(i, density_slice)
File "/home/drew/openmc/openmc/deplete/atom_number.py", line 199, in set_mat_slice
self[mat, :self.n_nuc_burn] = val
File "/home/drew/openmc/openmc/deplete/atom_number.py", line 104, in __setitem__
self.number[mat, nuc] = val
IndexError: index 0 is out of bounds for axis 0 with size 0
Reading I135 from /home/drew/nndc_hdf5/I135.h5
Reading Xe135 from /home/drew/nndc_hdf5/Xe135.h5
|
IndexError
|
def __init__(
self,
geometry,
settings,
chain_file=None,
prev_results=None,
diff_burnable_mats=False,
fission_q=None,
dilute_initial=1.0e3,
):
super().__init__(chain_file, fission_q, dilute_initial)
self.round_number = False
self.prev_res = None
self.settings = settings
self.geometry = geometry
self.diff_burnable_mats = diff_burnable_mats
# Differentiate burnable materials with multiple instances
if self.diff_burnable_mats:
self._differentiate_burnable_mats()
# Clear out OpenMC, create task lists, distribute
openmc.reset_auto_ids()
self.burnable_mats, volume, nuclides = self._get_burnable_mats()
self.local_mats = _distribute(self.burnable_mats)
# Generate map from local materials => material index
self._mat_index_map = {lm: self.burnable_mats.index(lm) for lm in self.local_mats}
if prev_results is not None:
# Reload volumes into geometry
prev_results[-1].transfer_volumes(geometry)
# Store previous results in operator
# Distribute reaction rates according to those tracked
# on this process
if comm.size == 1:
self.prev_res = prev_results
else:
self.prev_res = ResultsList()
mat_indexes = _distribute(range(len(self.burnable_mats)))
for res_obj in prev_results:
new_res = res_obj.distribute(self.local_mats, mat_indexes)
self.prev_res.append(new_res)
# Determine which nuclides have incident neutron data
self.nuclides_with_data = self._get_nuclides_with_data()
# Select nuclides with data that are also in the chain
self._burnable_nucs = [
nuc.name for nuc in self.chain.nuclides if nuc.name in self.nuclides_with_data
]
# Extract number densities from the geometry / previous depletion run
self._extract_number(self.local_mats, volume, nuclides, self.prev_res)
# Create reaction rates array
self.reaction_rates = ReactionRates(
self.local_mats, self._burnable_nucs, self.chain.reactions
)
# Get classes to assist working with tallies
self._rate_helper = DirectReactionRateHelper(
self.reaction_rates.n_nuc, self.reaction_rates.n_react
)
self._energy_helper = ChainFissionHelper()
|
def __init__(
self,
geometry,
settings,
chain_file=None,
prev_results=None,
diff_burnable_mats=False,
fission_q=None,
dilute_initial=1.0e3,
):
super().__init__(chain_file, fission_q, dilute_initial)
self.round_number = False
self.settings = settings
self.geometry = geometry
self.diff_burnable_mats = diff_burnable_mats
if prev_results is not None:
# Reload volumes into geometry
prev_results[-1].transfer_volumes(geometry)
# Store previous results in operator
self.prev_res = prev_results
else:
self.prev_res = None
# Differentiate burnable materials with multiple instances
if self.diff_burnable_mats:
self._differentiate_burnable_mats()
# Clear out OpenMC, create task lists, distribute
openmc.reset_auto_ids()
self.burnable_mats, volume, nuclides = self._get_burnable_mats()
self.local_mats = _distribute(self.burnable_mats)
# Generate map from local materials => material index
self._mat_index_map = {lm: self.burnable_mats.index(lm) for lm in self.local_mats}
# Determine which nuclides have incident neutron data
self.nuclides_with_data = self._get_nuclides_with_data()
# Select nuclides with data that are also in the chain
self._burnable_nucs = [
nuc.name for nuc in self.chain.nuclides if nuc.name in self.nuclides_with_data
]
# Extract number densities from the geometry / previous depletion run
self._extract_number(self.local_mats, volume, nuclides, self.prev_res)
# Create reaction rates array
self.reaction_rates = ReactionRates(
self.local_mats, self._burnable_nucs, self.chain.reactions
)
# Get classes to assist working with tallies
self._rate_helper = DirectReactionRateHelper(
self.reaction_rates.n_nuc, self.reaction_rates.n_react
)
self._energy_helper = ChainFissionHelper()
|
https://github.com/openmc-dev/openmc/issues/1275
|
Reading c_H_in_H2O from /home/drew/nndc_hdf5/c_H_in_H2O.h5
Maximum neutron transport energy: 20000000.000000 eV for U235
Reading tallies XML file...
Writing summary.h5 file...
Time to matexp: 0.1556260585784912
Traceback (most recent call last):
File "restart.py", line 20, in <module>
openmc.deplete.integrator.predictor(op, [time_steps[0]], power)
File "/home/drew/openmc/openmc/deplete/integrator/predictor.py", line 100, in predictor
op_results = [operator(x[0], power[-1])]
File "/home/drew/openmc/openmc/deplete/operator.py", line 175, in __call__
self.number.set_density(vec)
File "/home/drew/openmc/openmc/deplete/atom_number.py", line 214, in set_density
self.set_mat_slice(i, density_slice)
File "/home/drew/openmc/openmc/deplete/atom_number.py", line 199, in set_mat_slice
self[mat, :self.n_nuc_burn] = val
File "/home/drew/openmc/openmc/deplete/atom_number.py", line 104, in __setitem__
self.number[mat, nuc] = val
IndexError: index 0 is out of bounds for axis 0 with size 0
Reading I135 from /home/drew/nndc_hdf5/I135.h5
Reading Xe135 from /home/drew/nndc_hdf5/Xe135.h5
|
IndexError
|
def load_from_statepoint(self, statepoint):
"""Extracts tallies in an OpenMC StatePoint with the data needed to
compute multi-group cross sections.
This method is needed to compute cross section data from tallies
in an OpenMC StatePoint object.
NOTE: The statepoint must first be linked with an OpenMC Summary object.
Parameters
----------
statepoint : openmc.StatePoint
An OpenMC StatePoint object with tally data
Raises
------
ValueError
When this method is called with a statepoint that has not been
linked with a summary object.
"""
cv.check_type("statepoint", statepoint, openmc.statepoint.StatePoint)
if statepoint.summary is None:
msg = (
"Unable to load data from a statepoint which has not been "
"linked with a summary file"
)
raise ValueError(msg)
# Override the domain object that loaded from an OpenMC summary file
# NOTE: This is necessary for micro cross-sections which require
# the isotopic number densities as computed by OpenMC
if self.domain_type == "cell" or self.domain_type == "distribcell":
self.domain = statepoint.summary.get_cell_by_id(self.domain.id)
elif self.domain_type == "universe":
self.domain = statepoint.summary.get_universe_by_id(self.domain.id)
elif self.domain_type == "material":
self.domain = statepoint.summary.get_material_by_id(self.domain.id)
else:
msg = (
"Unable to load data from a statepoint for domain type {0} "
"which is not yet supported".format(self.domain_type)
)
raise ValueError(msg)
# Use tally "slicing" to ensure that tallies correspond to our domain
# NOTE: This is important if tally merging was used
if self.domain_type != "distribcell":
filters = [self.domain_type]
filter_bins = [(self.domain.id,)]
# Distribcell filters only accept single cell - neglect it when slicing
else:
filters = []
filter_bins = []
# Clear any tallies previously loaded from a statepoint
if self.loaded_sp:
self._tallies = None
self._xs_tally = None
self._rxn_rate_tally = None
self._loaded_sp = False
# Find, slice and store Tallies from StatePoint
# The tally slicing is needed if tally merging was used
for tally_type, tally in self.tallies.items():
sp_tally = statepoint.get_tally(
tally.scores,
tally.filters,
tally.nuclides,
estimator=tally.estimator,
exact_filters=True,
)
sp_tally = sp_tally.get_slice(
tally.scores, filters, filter_bins, tally.nuclides
)
sp_tally.sparse = self.sparse
self.tallies[tally_type] = sp_tally
self._loaded_sp = True
|
def load_from_statepoint(self, statepoint):
"""Extracts tallies in an OpenMC StatePoint with the data needed to
compute multi-group cross sections.
This method is needed to compute cross section data from tallies
in an OpenMC StatePoint object.
NOTE: The statepoint must first be linked with an OpenMC Summary object.
Parameters
----------
statepoint : openmc.StatePoint
An OpenMC StatePoint object with tally data
Raises
------
ValueError
When this method is called with a statepoint that has not been
linked with a summary object.
"""
cv.check_type("statepoint", statepoint, openmc.statepoint.StatePoint)
if statepoint.summary is None:
msg = (
"Unable to load data from a statepoint which has not been "
"linked with a summary file"
)
raise ValueError(msg)
# Override the domain object that loaded from an OpenMC summary file
# NOTE: This is necessary for micro cross-sections which require
# the isotopic number densities as computed by OpenMC
if self.domain_type == "cell" or self.domain_type == "distribcell":
self.domain = statepoint.summary.get_cell_by_id(self.domain.id)
elif self.domain_type == "universe":
self.domain = statepoint.summary.get_universe_by_id(self.domain.id)
elif self.domain_type == "material":
self.domain = statepoint.summary.get_material_by_id(self.domain.id)
else:
msg = (
"Unable to load data from a statepoint for domain type {0} "
"which is not yet supported".format(self.domain_type)
)
raise ValueError(msg)
# Use tally "slicing" to ensure that tallies correspond to our domain
# NOTE: This is important if tally merging was used
if self.domain_type != "distribcell":
filters = [self.domain_type]
filter_bins = [(self.domain.id,)]
# Distribcell filters only accept single cell - neglect it when slicing
else:
filters = []
filter_bins = []
# Clear any tallies previously loaded from a statepoint
if self.loaded_sp:
self._tallies = None
self._xs_tally = None
self._rxn_rate_tally = None
self._loaded_sp = False
# Find, slice and store Tallies from StatePoint
# The tally slicing is needed if tally merging was used
for tally_type, tally in self.tallies.items():
sp_tally = statepoint.get_tally(
tally.scores,
tally.filters,
tally.nuclides,
estimator=tally.estimator,
exact=True,
)
sp_tally = sp_tally.get_slice(
tally.scores, filters, filter_bins, tally.nuclides
)
sp_tally.sparse = self.sparse
self.tallies[tally_type] = sp_tally
self._loaded_sp = True
|
https://github.com/openmc-dev/openmc/issues/663
|
# Initialize MGXS Library with OpenMC statepoint data
mgxs_lib.load_from_statepoint(sp)
---------------------------------------------------------------------------
LookupError Traceback (most recent call last)
<ipython-input-28-76d7abb36a81> in <module>()
1 # Initialize MGXS Library with OpenMC statepoint data
----> 2 mgxs_lib.load_from_statepoint(sp)
/home/shaner/.local/lib/python2.7/site-packages/openmc-0.7.1-py2.7.egg/openmc/mgxs/library.pyc in load_from_statepoint(self, statepoint)
446 for mgxs_type in self.mgxs_types:
447 mgxs = self.get_mgxs(domain, mgxs_type)
--> 448 mgxs.load_from_statepoint(statepoint)
449 mgxs.sparse = self.sparse
450
/home/shaner/.local/lib/python2.7/site-packages/openmc-0.7.1-py2.7.egg/openmc/mgxs/mgxs.pyc in load_from_statepoint(self, statepoint)
694 sp_tally = statepoint.get_tally(
695 tally.scores, tally.filters, tally.nuclides,
--> 696 estimator=tally.estimator, exact=True)
697 sp_tally = sp_tally.get_slice(
698 tally.scores, filters, filter_bins, tally.nuclides)
/home/shaner/.local/lib/python2.7/site-packages/openmc-0.7.1-py2.7.egg/openmc/statepoint.pyc in get_tally(self, scores, filters, nuclides, name, id, estimator, exact)
620 # If we did not find the Tally, return an error message
621 if tally is None:
--> 622 raise LookupError('Unable to get Tally')
623
624 return tally
LookupError: Unable to get Tally
|
LookupError
|
def get_tally(
self,
scores=[],
filters=[],
nuclides=[],
name=None,
id=None,
estimator=None,
exact_filters=False,
exact_nuclides=False,
exact_scores=False,
):
"""Finds and returns a Tally object with certain properties.
This routine searches the list of Tallies and returns the first Tally
found which satisfies all of the input parameters.
NOTE: If any of the "exact" parameters are False (default), the input
parameters do not need to match the complete Tally specification and
may only represent a subset of the Tally's properties. If an "exact"
parameter is True then number of scores, filters, or nuclides in the
parameters must precisely match those of any matching Tally.
Parameters
----------
scores : list, optional
A list of one or more score strings (default is []).
filters : list, optional
A list of Filter objects (default is []).
nuclides : list, optional
A list of Nuclide objects (default is []).
name : str, optional
The name specified for the Tally (default is None).
id : Integral, optional
The id specified for the Tally (default is None).
estimator: str, optional
The type of estimator ('tracklength', 'analog'; default is None).
exact_filters : bool
If True, the number of filters in the parameters must be identical
to those in the matching Tally. If False (default), the filters in
the parameters may be a subset of those in the matching Tally.
exact_nuclides : bool
If True, the number of nuclides in the parameters must be identical
to those in the matching Tally. If False (default), the nuclides in
the parameters may be a subset of those in the matching Tally.
exact_scores : bool
If True, the number of scores in the parameters must be identical
to those in the matching Tally. If False (default), the scores
in the parameters may be a subset of those in the matching Tally.
Returns
-------
tally : openmc.Tally
A tally matching the specified criteria
Raises
------
LookupError
If a Tally meeting all of the input parameters cannot be found in
the statepoint.
"""
tally = None
# Iterate over all tallies to find the appropriate one
for tally_id, test_tally in self.tallies.items():
# Determine if Tally has queried name
if name and name != test_tally.name:
continue
# Determine if Tally has queried id
if id and id != test_tally.id:
continue
# Determine if Tally has queried estimator
if estimator and estimator != test_tally.estimator:
continue
# The number of filters, nuclides and scores must exactly match
if exact_scores and len(scores) != test_tally.num_scores:
continue
if exact_nuclides and len(nuclides) != test_tally.num_nuclides:
continue
if exact_filters and len(filters) != test_tally.num_filters:
continue
# Determine if Tally has the queried score(s)
if scores:
contains_scores = True
# Iterate over the scores requested by the user
for score in scores:
if score not in test_tally.scores:
contains_scores = False
break
if not contains_scores:
continue
# Determine if Tally has the queried Filter(s)
if filters:
contains_filters = True
# Iterate over the Filters requested by the user
for outer_filter in filters:
contains_filters = False
# Test if requested filter is a subset of any of the test
# tally's filters and if so continue to next filter
for inner_filter in test_tally.filters:
if inner_filter.is_subset(outer_filter):
contains_filters = True
break
if not contains_filters:
break
if not contains_filters:
continue
# Determine if Tally has the queried Nuclide(s)
if nuclides:
contains_nuclides = True
# Iterate over the Nuclides requested by the user
for nuclide in nuclides:
if nuclide not in test_tally.nuclides:
contains_nuclides = False
break
if not contains_nuclides:
continue
# If the current Tally met user's request, break loop and return it
tally = test_tally
break
# If we did not find the Tally, return an error message
if tally is None:
raise LookupError("Unable to get Tally")
return tally
|
def get_tally(
self,
scores=[],
filters=[],
nuclides=[],
name=None,
id=None,
estimator=None,
exact=False,
):
"""Finds and returns a Tally object with certain properties.
This routine searches the list of Tallies and returns the first Tally
found which satisfies all of the input parameters.
NOTE: If the "exact" parameter is False (default), the input parameters
do not need to match the complete Tally specification and may only
represent a subset of the Tally's properties. If the "exact" parameter
is True then the scores, filters, nuclides and estimator parameters
must precisely match those of any matching Tally.
Parameters
----------
scores : list, optional
A list of one or more score strings (default is []).
filters : list, optional
A list of Filter objects (default is []).
nuclides : list, optional
A list of Nuclide objects (default is []).
name : str, optional
The name specified for the Tally (default is None).
id : Integral, optional
The id specified for the Tally (default is None).
estimator: str, optional
The type of estimator ('tracklength', 'analog'; default is None).
exact : bool
Whether to strictly enforce the match between the parameters and
the returned tally
Returns
-------
tally : openmc.Tally
A tally matching the specified criteria
Raises
------
LookupError
If a Tally meeting all of the input parameters cannot be found in
the statepoint.
"""
tally = None
# Iterate over all tallies to find the appropriate one
for tally_id, test_tally in self.tallies.items():
# Determine if Tally has queried name
if name and name != test_tally.name:
continue
# Determine if Tally has queried id
if id and id != test_tally.id:
continue
# Determine if Tally has queried estimator
if (estimator or exact) and estimator != test_tally.estimator:
continue
# The number of filters, nuclides and scores must exactly match
if exact:
if len(scores) != test_tally.num_scores:
continue
if len(nuclides) != test_tally.num_nuclides:
continue
if len(filters) != test_tally.num_filters:
continue
# Determine if Tally has the queried score(s)
if scores:
contains_scores = True
# Iterate over the scores requested by the user
for score in scores:
if score not in test_tally.scores:
contains_scores = False
break
if not contains_scores:
continue
# Determine if Tally has the queried Filter(s)
if filters:
contains_filters = True
# Iterate over the Filters requested by the user
for outer_filter in filters:
contains_filters = False
# Test if requested filter is a subset of any of the test
# tally's filters and if so continue to next filter
for inner_filter in test_tally.filters:
if inner_filter.is_subset(outer_filter):
contains_filters = True
break
if not contains_filters:
break
if not contains_filters:
continue
# Determine if Tally has the queried Nuclide(s)
if nuclides:
contains_nuclides = True
# Iterate over the Nuclides requested by the user
for nuclide in nuclides:
if nuclide not in test_tally.nuclides:
contains_nuclides = False
break
if not contains_nuclides:
continue
# If the current Tally met user's request, break loop and return it
tally = test_tally
break
# If we did not find the Tally, return an error message
if tally is None:
raise LookupError("Unable to get Tally")
return tally
|
https://github.com/openmc-dev/openmc/issues/663
|
# Initialize MGXS Library with OpenMC statepoint data
mgxs_lib.load_from_statepoint(sp)
---------------------------------------------------------------------------
LookupError Traceback (most recent call last)
<ipython-input-28-76d7abb36a81> in <module>()
1 # Initialize MGXS Library with OpenMC statepoint data
----> 2 mgxs_lib.load_from_statepoint(sp)
/home/shaner/.local/lib/python2.7/site-packages/openmc-0.7.1-py2.7.egg/openmc/mgxs/library.pyc in load_from_statepoint(self, statepoint)
446 for mgxs_type in self.mgxs_types:
447 mgxs = self.get_mgxs(domain, mgxs_type)
--> 448 mgxs.load_from_statepoint(statepoint)
449 mgxs.sparse = self.sparse
450
/home/shaner/.local/lib/python2.7/site-packages/openmc-0.7.1-py2.7.egg/openmc/mgxs/mgxs.pyc in load_from_statepoint(self, statepoint)
694 sp_tally = statepoint.get_tally(
695 tally.scores, tally.filters, tally.nuclides,
--> 696 estimator=tally.estimator, exact=True)
697 sp_tally = sp_tally.get_slice(
698 tally.scores, filters, filter_bins, tally.nuclides)
/home/shaner/.local/lib/python2.7/site-packages/openmc-0.7.1-py2.7.egg/openmc/statepoint.pyc in get_tally(self, scores, filters, nuclides, name, id, estimator, exact)
620 # If we did not find the Tally, return an error message
621 if tally is None:
--> 622 raise LookupError('Unable to get Tally')
623
624 return tally
LookupError: Unable to get Tally
|
LookupError
|
def make_sentry_teller(env):
if env.sentry_dsn:
try:
release = get_version()
if "-" in release:
release = None
except Exception:
release = None
sentry = raven.Client(
env.sentry_dsn,
environment=env.instance_type,
release=release,
)
else:
sentry = None
print("Won't log to Sentry (SENTRY_DSN is empty).")
def tell_sentry(exception, state, allow_reraise=True):
if isinstance(exception, pando.Response) and exception.code < 500:
# Only log server errors
return
if isinstance(exception, NeedDatabase):
# Don't flood Sentry when DB is down
return
if isinstance(exception, PoolError):
# If this happens, then the `DATABASE_MAXCONN` value is too low.
state["exception"] = NeedDatabase()
if isinstance(exception, psycopg2.Error):
from liberapay.website import website
if getattr(website, "db", None):
try:
website.db.one("SELECT 1 AS x")
except psycopg2.Error as e:
# If it can't answer this simple query, then it's either
# down or unreachable. Show the proper 503 error page.
website.db.okay = False
state["exception"] = NeedDatabase()
if sentry:
# Record the exception raised above instead of the
# original one, to avoid duplicate issues.
return tell_sentry(e, state, allow_reraise=True)
if "read-only" in str(exception):
# DB is in read only mode
state["db_is_readonly"] = True
# Show the proper 503 error page
state["exception"] = NeedDatabase()
# Don't reraise this in tests
allow_reraise = False
if isinstance(exception, ValueError):
if "cannot contain NUL (0x00) characters" in str(exception):
# https://github.com/liberapay/liberapay.com/issues/675
response = state.get("response") or pando.Response()
response.code = 400
response.body = str(exception)
return {"exception": None}
if not sentry:
# No Sentry, log to stderr instead
traceback.print_exc()
# Reraise if allowed
if env.sentry_reraise and allow_reraise:
raise
return {"sentry_ident": None}
# Prepare context data
sentry_data = {}
if state:
try:
sentry_data["tags"] = {
"lang": getattr(state.get("locale"), "language", None),
}
request = state.get("request")
user_data = sentry_data["user"] = {}
if request is not None:
user_data["ip_address"] = str(request.source)
decode = lambda b: b.decode("ascii", "backslashreplace")
sentry_data["request"] = {
"method": request.method,
"url": request.line.uri.decoded,
"headers": {
decode(k): decode(b", ".join(v))
for k, v in request.headers.items()
if k != b"Cookie"
},
}
user = state.get("user")
if isinstance(user, Participant):
user_data["id"] = getattr(user, "id", None)
user_data["username"] = getattr(user, "username", None)
except Exception as e:
tell_sentry(e, {})
# Tell Sentry
result = sentry.captureException(data=sentry_data)
# Put the Sentry id in the state for logging, etc
return {"sentry_ident": sentry.get_ident(result)}
CustomUndefined._tell_sentry = staticmethod(tell_sentry)
return {"tell_sentry": tell_sentry}
|
def make_sentry_teller(env):
if env.sentry_dsn:
try:
release = get_version()
if "-" in release:
release = None
except Exception:
release = None
sentry = raven.Client(
env.sentry_dsn,
environment=env.instance_type,
release=release,
)
else:
sentry = None
print("Won't log to Sentry (SENTRY_DSN is empty).")
def tell_sentry(exception, state, allow_reraise=True):
if isinstance(exception, pando.Response) and exception.code < 500:
# Only log server errors
return
if isinstance(exception, NeedDatabase):
# Don't flood Sentry when DB is down
return
if isinstance(exception, psycopg2.Error):
from liberapay.website import website
if getattr(website, "db", None):
try:
website.db.one("SELECT 1 AS x")
except psycopg2.Error as e:
# If it can't answer this simple query, then it's either
# down or unreachable. Show the proper 503 error page.
website.db.okay = False
state["exception"] = NeedDatabase()
if sentry:
# Record the exception raised above instead of the
# original one, to avoid duplicate issues.
return tell_sentry(e, state, allow_reraise=True)
if "read-only" in str(exception):
# DB is in read only mode
state["db_is_readonly"] = True
# Show the proper 503 error page
state["exception"] = NeedDatabase()
# Don't reraise this in tests
allow_reraise = False
if isinstance(exception, ValueError):
if "cannot contain NUL (0x00) characters" in str(exception):
# https://github.com/liberapay/liberapay.com/issues/675
response = state.get("response") or pando.Response()
response.code = 400
response.body = str(exception)
return {"exception": None}
if not sentry:
# No Sentry, log to stderr instead
traceback.print_exc()
# Reraise if allowed
if env.sentry_reraise and allow_reraise:
raise
return {"sentry_ident": None}
# Prepare context data
sentry_data = {}
if state:
try:
sentry_data["tags"] = {
"lang": getattr(state.get("locale"), "language", None),
}
request = state.get("request")
user_data = sentry_data["user"] = {}
if request is not None:
user_data["ip_address"] = str(request.source)
decode = lambda b: b.decode("ascii", "backslashreplace")
sentry_data["request"] = {
"method": request.method,
"url": request.line.uri.decoded,
"headers": {
decode(k): decode(b", ".join(v))
for k, v in request.headers.items()
if k != b"Cookie"
},
}
user = state.get("user")
if isinstance(user, Participant):
user_data["id"] = getattr(user, "id", None)
user_data["username"] = getattr(user, "username", None)
except Exception as e:
tell_sentry(e, {})
# Tell Sentry
result = sentry.captureException(data=sentry_data)
# Put the Sentry id in the state for logging, etc
return {"sentry_ident": sentry.get_ident(result)}
CustomUndefined._tell_sentry = staticmethod(tell_sentry)
return {"tell_sentry": tell_sentry}
|
https://github.com/liberapay/liberapay.com/issues/846
|
Traceback (most recent call last):
...
File "env/lib/python3.6/site-packages/postgres/__init__.py", line 451, in get_cursor
return CursorContextManager(self.pool, **kw)
File "env/lib/python3.6/site-packages/postgres/context_managers.py", line 35, in __init__
conn = self.pool.getconn()
File "env/lib/python3.6/site-packages/psycopg2_pool/__init__.py", line 236, in getconn
return super(ThreadSafeConnectionPool, self).getconn()
File "env/lib/python3.6/site-packages/psycopg2_pool/__init__.py", line 120, in getconn
raise PoolError("connection pool exhausted")
psycopg2_pool.PoolError: connection pool exhausted
|
psycopg2_pool.PoolError
|
def tell_sentry(exception, state, allow_reraise=True):
if isinstance(exception, pando.Response) and exception.code < 500:
# Only log server errors
return
if isinstance(exception, NeedDatabase):
# Don't flood Sentry when DB is down
return
if isinstance(exception, PoolError):
# If this happens, then the `DATABASE_MAXCONN` value is too low.
state["exception"] = NeedDatabase()
if isinstance(exception, psycopg2.Error):
from liberapay.website import website
if getattr(website, "db", None):
try:
website.db.one("SELECT 1 AS x")
except psycopg2.Error as e:
# If it can't answer this simple query, then it's either
# down or unreachable. Show the proper 503 error page.
website.db.okay = False
state["exception"] = NeedDatabase()
if sentry:
# Record the exception raised above instead of the
# original one, to avoid duplicate issues.
return tell_sentry(e, state, allow_reraise=True)
if "read-only" in str(exception):
# DB is in read only mode
state["db_is_readonly"] = True
# Show the proper 503 error page
state["exception"] = NeedDatabase()
# Don't reraise this in tests
allow_reraise = False
if isinstance(exception, ValueError):
if "cannot contain NUL (0x00) characters" in str(exception):
# https://github.com/liberapay/liberapay.com/issues/675
response = state.get("response") or pando.Response()
response.code = 400
response.body = str(exception)
return {"exception": None}
if not sentry:
# No Sentry, log to stderr instead
traceback.print_exc()
# Reraise if allowed
if env.sentry_reraise and allow_reraise:
raise
return {"sentry_ident": None}
# Prepare context data
sentry_data = {}
if state:
try:
sentry_data["tags"] = {
"lang": getattr(state.get("locale"), "language", None),
}
request = state.get("request")
user_data = sentry_data["user"] = {}
if request is not None:
user_data["ip_address"] = str(request.source)
decode = lambda b: b.decode("ascii", "backslashreplace")
sentry_data["request"] = {
"method": request.method,
"url": request.line.uri.decoded,
"headers": {
decode(k): decode(b", ".join(v))
for k, v in request.headers.items()
if k != b"Cookie"
},
}
user = state.get("user")
if isinstance(user, Participant):
user_data["id"] = getattr(user, "id", None)
user_data["username"] = getattr(user, "username", None)
except Exception as e:
tell_sentry(e, {})
# Tell Sentry
result = sentry.captureException(data=sentry_data)
# Put the Sentry id in the state for logging, etc
return {"sentry_ident": sentry.get_ident(result)}
|
def tell_sentry(exception, state, allow_reraise=True):
if isinstance(exception, pando.Response) and exception.code < 500:
# Only log server errors
return
if isinstance(exception, NeedDatabase):
# Don't flood Sentry when DB is down
return
if isinstance(exception, psycopg2.Error):
from liberapay.website import website
if getattr(website, "db", None):
try:
website.db.one("SELECT 1 AS x")
except psycopg2.Error as e:
# If it can't answer this simple query, then it's either
# down or unreachable. Show the proper 503 error page.
website.db.okay = False
state["exception"] = NeedDatabase()
if sentry:
# Record the exception raised above instead of the
# original one, to avoid duplicate issues.
return tell_sentry(e, state, allow_reraise=True)
if "read-only" in str(exception):
# DB is in read only mode
state["db_is_readonly"] = True
# Show the proper 503 error page
state["exception"] = NeedDatabase()
# Don't reraise this in tests
allow_reraise = False
if isinstance(exception, ValueError):
if "cannot contain NUL (0x00) characters" in str(exception):
# https://github.com/liberapay/liberapay.com/issues/675
response = state.get("response") or pando.Response()
response.code = 400
response.body = str(exception)
return {"exception": None}
if not sentry:
# No Sentry, log to stderr instead
traceback.print_exc()
# Reraise if allowed
if env.sentry_reraise and allow_reraise:
raise
return {"sentry_ident": None}
# Prepare context data
sentry_data = {}
if state:
try:
sentry_data["tags"] = {
"lang": getattr(state.get("locale"), "language", None),
}
request = state.get("request")
user_data = sentry_data["user"] = {}
if request is not None:
user_data["ip_address"] = str(request.source)
decode = lambda b: b.decode("ascii", "backslashreplace")
sentry_data["request"] = {
"method": request.method,
"url": request.line.uri.decoded,
"headers": {
decode(k): decode(b", ".join(v))
for k, v in request.headers.items()
if k != b"Cookie"
},
}
user = state.get("user")
if isinstance(user, Participant):
user_data["id"] = getattr(user, "id", None)
user_data["username"] = getattr(user, "username", None)
except Exception as e:
tell_sentry(e, {})
# Tell Sentry
result = sentry.captureException(data=sentry_data)
# Put the Sentry id in the state for logging, etc
return {"sentry_ident": sentry.get_ident(result)}
|
https://github.com/liberapay/liberapay.com/issues/846
|
Traceback (most recent call last):
...
File "env/lib/python3.6/site-packages/postgres/__init__.py", line 451, in get_cursor
return CursorContextManager(self.pool, **kw)
File "env/lib/python3.6/site-packages/postgres/context_managers.py", line 35, in __init__
conn = self.pool.getconn()
File "env/lib/python3.6/site-packages/psycopg2_pool/__init__.py", line 236, in getconn
return super(ThreadSafeConnectionPool, self).getconn()
File "env/lib/python3.6/site-packages/psycopg2_pool/__init__.py", line 120, in getconn
raise PoolError("connection pool exhausted")
psycopg2_pool.PoolError: connection pool exhausted
|
psycopg2_pool.PoolError
|
def start(self, engine):
self.play_result = PlayResult(None, None)
self.stopped = False
self.pong_after_move = None
self.pong_after_ponder = None
# Set game, position and configure.
engine._new(board, game, options)
# Limit or time control.
increment = limit.white_inc if board.turn else limit.black_inc
if limit.remaining_moves or increment:
base_mins, base_secs = divmod(
int(limit.white_clock if board.turn else limit.black_clock), 60
)
engine.send_line(
"level {} {}:{:02d} {}".format(
limit.remaining_moves or 0, base_mins, base_secs, increment
)
)
if limit.nodes is not None:
if (
limit.time is not None
or limit.white_clock is not None
or limit.black_clock is not None
or increment is not None
):
raise EngineError(
"xboard does not support mixing node limits with time limits"
)
if "nps" not in engine.features:
LOGGER.warning(
"%s: Engine did not declare explicit support for node limits (feature nps=?)"
)
elif not engine.features["nps"]:
raise EngineError(
"xboard engine does not support node limits (feature nps=0)"
)
engine.send_line("nps 1")
engine.send_line("st {}".format(int(limit.nodes)))
if limit.depth is not None:
engine.send_line("sd {}".format(limit.depth))
if limit.time is not None:
engine.send_line("st {}".format(limit.time))
if limit.white_clock is not None:
engine.send_line(
"{} {}".format(
"time" if board.turn else "otim", int(limit.white_clock * 100)
)
)
if limit.black_clock is not None:
engine.send_line(
"{} {}".format(
"otim" if board.turn else "time", int(limit.black_clock * 100)
)
)
# Start thinking.
engine.send_line("post" if info else "nopost")
engine.send_line("hard" if ponder else "easy")
engine.send_line("go")
|
def start(self, engine):
self.info = {}
self.stopped = False
self.final_pong = None
self.draw_offered = False
# Set game, position and configure.
engine._new(board, game, options)
# Limit or time control.
increment = limit.white_inc if board.turn else limit.black_inc
if limit.remaining_moves or increment:
base_mins, base_secs = divmod(
int(limit.white_clock if board.turn else limit.black_clock), 60
)
engine.send_line(
"level {} {}:{:02d} {}".format(
limit.remaining_moves or 0, base_mins, base_secs, increment
)
)
if limit.nodes is not None:
if (
limit.time is not None
or limit.white_clock is not None
or limit.black_clock is not None
or increment is not None
):
raise EngineError(
"xboard does not support mixing node limits with time limits"
)
if "nps" not in engine.features:
LOGGER.warning(
"%s: Engine did not declare explicit support for node limits (feature nps=?)"
)
elif not engine.features["nps"]:
raise EngineError(
"xboard engine does not support node limits (feature nps=0)"
)
engine.send_line("nps 1")
engine.send_line("st {}".format(int(limit.nodes)))
if limit.depth is not None:
engine.send_line("sd {}".format(limit.depth))
if limit.time is not None:
engine.send_line("st {}".format(limit.time))
if limit.white_clock is not None:
engine.send_line(
"{} {}".format(
"time" if board.turn else "otim", int(limit.white_clock * 100)
)
)
if limit.black_clock is not None:
engine.send_line(
"{} {}".format(
"otim" if board.turn else "time", int(limit.black_clock * 100)
)
)
# Start thinking.
engine.send_line("post" if info else "nopost")
engine.send_line("hard" if ponder else "easy")
engine.send_line("go")
|
https://github.com/niklasf/python-chess/issues/379
|
Exception in callback EngineProtocol.pipe_data_received(1, b'you play bo...Drawn game}\n')
handle: <Handle EngineProtocol.pipe_data_received(1, b'you play bo...Drawn game}\n')>
Traceback (most recent call last):
File "/usr/lib/python3.5/asyncio/events.py", line 126, in _run
self._callback(*self._args)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 602, in pipe_data_received
self._line_received(line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 615, in _line_received
self.command._line_received(self, line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 869, in _line_received
self.line_received(engine, line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 1716, in line_received
self._move(engine, line.split(" ", 1)[1])
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 1748, in _move
self.result.set_result(PlayResult(move, None, self.info, self.draw_offered))
File "/usr/lib/python3.5/asyncio/futures.py", line 348, in set_result
raise InvalidStateError('{}: {!r}'.format(self._state, self))
asyncio.futures.InvalidStateError: FINISHED: <Future finished result=<PlayResult a...ffered=False)>>
Traceback (most recent call last):
File "./tournament.py", line 96, in <module>
board.push(result.move)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/__init__.py", line 1942, in push
move = self._to_chess960(move)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/__init__.py", line 3318, in _to_chess960
if move.from_square == E1 and self.kings & BB_E1:
AttributeError: 'NoneType' object has no attribute 'from_square'
|
asyncio.futures.InvalidStateError
|
def line_received(self, engine, line):
if line.startswith("move "):
self._move(engine, line.split(" ", 1)[1])
elif line.startswith("Hint: "):
self._hint(engine, line.split(" ", 1)[1])
elif line == self.pong_after_move:
if not self.result.done():
self.result.set_result(self.play_result)
if not ponder:
self.set_finished()
elif line == self.pong_after_ponder:
if not self.result.done():
self.result.set_result(self.play_result)
self.set_finished()
elif line == "offer draw":
if not self.result.done():
self.play_result.draw_offered = True
self._ping_after_move(engine)
elif line == "resign":
if not self.result.done():
self.play_result.resigned = True
self._ping_after_move(engine)
elif line.startswith("1-0") or line.startswith("0-1") or line.startswith("1/2-1/2"):
self._ping_after_move(engine)
elif line.startswith("#"):
pass
elif len(line.split()) >= 4 and line.lstrip()[0].isdigit():
self._post(engine, line)
else:
LOGGER.warning("%s: Unexpected engine output: %s", engine, line)
|
def line_received(self, engine, line):
if line.startswith("move "):
self._move(engine, line.split(" ", 1)[1])
elif line == self.final_pong:
if not self.result.done():
self.result.set_exception(
EngineError("xboard engine answered final pong before sending move")
)
self.end(engine)
elif line == "offer draw":
self.draw_offered = True
elif line == "resign":
self.result.set_result(
PlayResult(
None, None, self.info, draw_offered=self.draw_offered, resigned=True
)
)
self.end(engine)
elif line.startswith("1-0") or line.startswith("0-1") or line.startswith("1/2-1/2"):
if not self.result.done():
self.result.set_result(
PlayResult(None, None, self.info, draw_offered=self.draw_offered)
)
self.end(engine)
elif line.startswith("#") or line.startswith("Hint:"):
pass
elif len(line.split()) >= 4 and line.lstrip()[0].isdigit():
self._post(engine, line)
else:
LOGGER.warning("%s: Unexpected engine output: %s", engine, line)
|
https://github.com/niklasf/python-chess/issues/379
|
Exception in callback EngineProtocol.pipe_data_received(1, b'you play bo...Drawn game}\n')
handle: <Handle EngineProtocol.pipe_data_received(1, b'you play bo...Drawn game}\n')>
Traceback (most recent call last):
File "/usr/lib/python3.5/asyncio/events.py", line 126, in _run
self._callback(*self._args)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 602, in pipe_data_received
self._line_received(line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 615, in _line_received
self.command._line_received(self, line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 869, in _line_received
self.line_received(engine, line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 1716, in line_received
self._move(engine, line.split(" ", 1)[1])
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 1748, in _move
self.result.set_result(PlayResult(move, None, self.info, self.draw_offered))
File "/usr/lib/python3.5/asyncio/futures.py", line 348, in set_result
raise InvalidStateError('{}: {!r}'.format(self._state, self))
asyncio.futures.InvalidStateError: FINISHED: <Future finished result=<PlayResult a...ffered=False)>>
Traceback (most recent call last):
File "./tournament.py", line 96, in <module>
board.push(result.move)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/__init__.py", line 1942, in push
move = self._to_chess960(move)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/__init__.py", line 3318, in _to_chess960
if move.from_square == E1 and self.kings & BB_E1:
AttributeError: 'NoneType' object has no attribute 'from_square'
|
asyncio.futures.InvalidStateError
|
def cancel(self, engine):
if self.stopped:
return
self.stopped = True
if self.result.cancelled():
engine.send_line("?")
if ponder:
engine.send_line("easy")
n = (id(self) + 1) & 0xFFFF
self.pong_after_ponder = "pong {}".format(n)
engine._ping(n)
|
def cancel(self, engine):
if self.stopped:
return
self.stopped = True
if self.result.cancelled():
engine.send_line("?")
if ponder:
engine.send_line("easy")
n = id(self) & 0xFFFF
self.final_pong = "pong {}".format(n)
engine._ping(n)
|
https://github.com/niklasf/python-chess/issues/379
|
Exception in callback EngineProtocol.pipe_data_received(1, b'you play bo...Drawn game}\n')
handle: <Handle EngineProtocol.pipe_data_received(1, b'you play bo...Drawn game}\n')>
Traceback (most recent call last):
File "/usr/lib/python3.5/asyncio/events.py", line 126, in _run
self._callback(*self._args)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 602, in pipe_data_received
self._line_received(line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 615, in _line_received
self.command._line_received(self, line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 869, in _line_received
self.line_received(engine, line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 1716, in line_received
self._move(engine, line.split(" ", 1)[1])
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 1748, in _move
self.result.set_result(PlayResult(move, None, self.info, self.draw_offered))
File "/usr/lib/python3.5/asyncio/futures.py", line 348, in set_result
raise InvalidStateError('{}: {!r}'.format(self._state, self))
asyncio.futures.InvalidStateError: FINISHED: <Future finished result=<PlayResult a...ffered=False)>>
Traceback (most recent call last):
File "./tournament.py", line 96, in <module>
board.push(result.move)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/__init__.py", line 1942, in push
move = self._to_chess960(move)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/__init__.py", line 3318, in _to_chess960
if move.from_square == E1 and self.kings & BB_E1:
AttributeError: 'NoneType' object has no attribute 'from_square'
|
asyncio.futures.InvalidStateError
|
def play(
self,
board,
limit,
*,
game=None,
info=INFO_NONE,
ponder=False,
root_moves=None,
options={},
):
if root_moves is not None:
raise EngineError(
"play with root_moves, but xboard supports 'include' only in analysis mode"
)
class Command(BaseCommand):
def start(self, engine):
self.play_result = PlayResult(None, None)
self.stopped = False
self.pong_after_move = None
self.pong_after_ponder = None
# Set game, position and configure.
engine._new(board, game, options)
# Limit or time control.
increment = limit.white_inc if board.turn else limit.black_inc
if limit.remaining_moves or increment:
base_mins, base_secs = divmod(
int(limit.white_clock if board.turn else limit.black_clock), 60
)
engine.send_line(
"level {} {}:{:02d} {}".format(
limit.remaining_moves or 0, base_mins, base_secs, increment
)
)
if limit.nodes is not None:
if (
limit.time is not None
or limit.white_clock is not None
or limit.black_clock is not None
or increment is not None
):
raise EngineError(
"xboard does not support mixing node limits with time limits"
)
if "nps" not in engine.features:
LOGGER.warning(
"%s: Engine did not declare explicit support for node limits (feature nps=?)"
)
elif not engine.features["nps"]:
raise EngineError(
"xboard engine does not support node limits (feature nps=0)"
)
engine.send_line("nps 1")
engine.send_line("st {}".format(int(limit.nodes)))
if limit.depth is not None:
engine.send_line("sd {}".format(limit.depth))
if limit.time is not None:
engine.send_line("st {}".format(limit.time))
if limit.white_clock is not None:
engine.send_line(
"{} {}".format(
"time" if board.turn else "otim", int(limit.white_clock * 100)
)
)
if limit.black_clock is not None:
engine.send_line(
"{} {}".format(
"otim" if board.turn else "time", int(limit.black_clock * 100)
)
)
# Start thinking.
engine.send_line("post" if info else "nopost")
engine.send_line("hard" if ponder else "easy")
engine.send_line("go")
def line_received(self, engine, line):
if line.startswith("move "):
self._move(engine, line.split(" ", 1)[1])
elif line.startswith("Hint: "):
self._hint(engine, line.split(" ", 1)[1])
elif line == self.pong_after_move:
if not self.result.done():
self.result.set_result(self.play_result)
if not ponder:
self.set_finished()
elif line == self.pong_after_ponder:
if not self.result.done():
self.result.set_result(self.play_result)
self.set_finished()
elif line == "offer draw":
if not self.result.done():
self.play_result.draw_offered = True
self._ping_after_move(engine)
elif line == "resign":
if not self.result.done():
self.play_result.resigned = True
self._ping_after_move(engine)
elif (
line.startswith("1-0")
or line.startswith("0-1")
or line.startswith("1/2-1/2")
):
self._ping_after_move(engine)
elif line.startswith("#"):
pass
elif len(line.split()) >= 4 and line.lstrip()[0].isdigit():
self._post(engine, line)
else:
LOGGER.warning("%s: Unexpected engine output: %s", engine, line)
def _post(self, engine, line):
if not self.result.done():
self.play_result.info = _parse_xboard_post(line, engine.board, info)
def _move(self, engine, arg):
if not self.result.done() and self.play_result.move is None:
try:
self.play_result.move = engine.board.push_xboard(arg)
except ValueError as err:
self.result.set_exception(EngineError(err))
else:
self._ping_after_move(engine)
else:
try:
engine.board.push_xboard(arg)
except ValueError:
LOGGER.exception("exception playing unexpected move")
def _hint(self, engine, arg):
if (
not self.result.done()
and self.play_result.move is not None
and self.play_result.ponder is None
):
try:
self.play_result.ponder = engine.board.parse_xboard(arg)
except ValueError:
LOGGER.exception("exception parsing hint")
else:
LOGGER.warning("unexpected hint: %r", arg)
def _ping_after_move(self, engine):
if self.pong_after_move is None:
n = id(self) & 0xFFFF
self.pong_after_move = "pong {}".format(n)
engine._ping(n)
def cancel(self, engine):
if self.stopped:
return
self.stopped = True
if self.result.cancelled():
engine.send_line("?")
if ponder:
engine.send_line("easy")
n = (id(self) + 1) & 0xFFFF
self.pong_after_ponder = "pong {}".format(n)
engine._ping(n)
return (yield from self.communicate(Command))
|
def play(
self,
board,
limit,
*,
game=None,
info=INFO_NONE,
ponder=False,
root_moves=None,
options={},
):
if root_moves is not None:
raise EngineError(
"play with root_moves, but xboard supports include only in analysis mode"
)
class Command(BaseCommand):
def start(self, engine):
self.info = {}
self.stopped = False
self.final_pong = None
self.draw_offered = False
# Set game, position and configure.
engine._new(board, game, options)
# Limit or time control.
increment = limit.white_inc if board.turn else limit.black_inc
if limit.remaining_moves or increment:
base_mins, base_secs = divmod(
int(limit.white_clock if board.turn else limit.black_clock), 60
)
engine.send_line(
"level {} {}:{:02d} {}".format(
limit.remaining_moves or 0, base_mins, base_secs, increment
)
)
if limit.nodes is not None:
if (
limit.time is not None
or limit.white_clock is not None
or limit.black_clock is not None
or increment is not None
):
raise EngineError(
"xboard does not support mixing node limits with time limits"
)
if "nps" not in engine.features:
LOGGER.warning(
"%s: Engine did not declare explicit support for node limits (feature nps=?)"
)
elif not engine.features["nps"]:
raise EngineError(
"xboard engine does not support node limits (feature nps=0)"
)
engine.send_line("nps 1")
engine.send_line("st {}".format(int(limit.nodes)))
if limit.depth is not None:
engine.send_line("sd {}".format(limit.depth))
if limit.time is not None:
engine.send_line("st {}".format(limit.time))
if limit.white_clock is not None:
engine.send_line(
"{} {}".format(
"time" if board.turn else "otim", int(limit.white_clock * 100)
)
)
if limit.black_clock is not None:
engine.send_line(
"{} {}".format(
"otim" if board.turn else "time", int(limit.black_clock * 100)
)
)
# Start thinking.
engine.send_line("post" if info else "nopost")
engine.send_line("hard" if ponder else "easy")
engine.send_line("go")
def line_received(self, engine, line):
if line.startswith("move "):
self._move(engine, line.split(" ", 1)[1])
elif line == self.final_pong:
if not self.result.done():
self.result.set_exception(
EngineError(
"xboard engine answered final pong before sending move"
)
)
self.end(engine)
elif line == "offer draw":
self.draw_offered = True
elif line == "resign":
self.result.set_result(
PlayResult(
None,
None,
self.info,
draw_offered=self.draw_offered,
resigned=True,
)
)
self.end(engine)
elif (
line.startswith("1-0")
or line.startswith("0-1")
or line.startswith("1/2-1/2")
):
if not self.result.done():
self.result.set_result(
PlayResult(
None, None, self.info, draw_offered=self.draw_offered
)
)
self.end(engine)
elif line.startswith("#") or line.startswith("Hint:"):
pass
elif len(line.split()) >= 4 and line.lstrip()[0].isdigit():
self._post(engine, line)
else:
LOGGER.warning("%s: Unexpected engine output: %s", engine, line)
def _post(self, engine, line):
if not self.result.done():
self.info = _parse_xboard_post(line, engine.board, info)
def _move(self, engine, arg):
if not self.result.cancelled():
try:
move = engine.board.push_xboard(arg)
except ValueError as err:
self.result.set_exception(EngineError(err))
else:
self.result.set_result(
PlayResult(
move, None, self.info, draw_offered=self.draw_offered
)
)
if not ponder:
self.end(engine)
def cancel(self, engine):
if self.stopped:
return
self.stopped = True
if self.result.cancelled():
engine.send_line("?")
if ponder:
engine.send_line("easy")
n = id(self) & 0xFFFF
self.final_pong = "pong {}".format(n)
engine._ping(n)
def end(self, engine):
self.set_finished()
return (yield from self.communicate(Command))
|
https://github.com/niklasf/python-chess/issues/379
|
Exception in callback EngineProtocol.pipe_data_received(1, b'you play bo...Drawn game}\n')
handle: <Handle EngineProtocol.pipe_data_received(1, b'you play bo...Drawn game}\n')>
Traceback (most recent call last):
File "/usr/lib/python3.5/asyncio/events.py", line 126, in _run
self._callback(*self._args)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 602, in pipe_data_received
self._line_received(line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 615, in _line_received
self.command._line_received(self, line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 869, in _line_received
self.line_received(engine, line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 1716, in line_received
self._move(engine, line.split(" ", 1)[1])
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 1748, in _move
self.result.set_result(PlayResult(move, None, self.info, self.draw_offered))
File "/usr/lib/python3.5/asyncio/futures.py", line 348, in set_result
raise InvalidStateError('{}: {!r}'.format(self._state, self))
asyncio.futures.InvalidStateError: FINISHED: <Future finished result=<PlayResult a...ffered=False)>>
Traceback (most recent call last):
File "./tournament.py", line 96, in <module>
board.push(result.move)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/__init__.py", line 1942, in push
move = self._to_chess960(move)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/__init__.py", line 3318, in _to_chess960
if move.from_square == E1 and self.kings & BB_E1:
AttributeError: 'NoneType' object has no attribute 'from_square'
|
asyncio.futures.InvalidStateError
|
def _move(self, engine, arg):
if not self.result.done() and self.play_result.move is None:
try:
self.play_result.move = engine.board.push_xboard(arg)
except ValueError as err:
self.result.set_exception(EngineError(err))
else:
self._ping_after_move(engine)
else:
try:
engine.board.push_xboard(arg)
except ValueError:
LOGGER.exception("exception playing unexpected move")
|
def _move(self, engine, arg):
if not self.result.cancelled():
try:
move = engine.board.push_xboard(arg)
except ValueError as err:
self.result.set_exception(EngineError(err))
else:
self.result.set_result(
PlayResult(move, None, self.info, draw_offered=self.draw_offered)
)
if not ponder:
self.end(engine)
|
https://github.com/niklasf/python-chess/issues/379
|
Exception in callback EngineProtocol.pipe_data_received(1, b'you play bo...Drawn game}\n')
handle: <Handle EngineProtocol.pipe_data_received(1, b'you play bo...Drawn game}\n')>
Traceback (most recent call last):
File "/usr/lib/python3.5/asyncio/events.py", line 126, in _run
self._callback(*self._args)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 602, in pipe_data_received
self._line_received(line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 615, in _line_received
self.command._line_received(self, line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 869, in _line_received
self.line_received(engine, line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 1716, in line_received
self._move(engine, line.split(" ", 1)[1])
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 1748, in _move
self.result.set_result(PlayResult(move, None, self.info, self.draw_offered))
File "/usr/lib/python3.5/asyncio/futures.py", line 348, in set_result
raise InvalidStateError('{}: {!r}'.format(self._state, self))
asyncio.futures.InvalidStateError: FINISHED: <Future finished result=<PlayResult a...ffered=False)>>
Traceback (most recent call last):
File "./tournament.py", line 96, in <module>
board.push(result.move)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/__init__.py", line 1942, in push
move = self._to_chess960(move)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/__init__.py", line 3318, in _to_chess960
if move.from_square == E1 and self.kings & BB_E1:
AttributeError: 'NoneType' object has no attribute 'from_square'
|
asyncio.futures.InvalidStateError
|
def _post(self, engine, line):
if not self.result.done():
self.play_result.info = _parse_xboard_post(line, engine.board, info)
|
def _post(self, engine, line):
if not self.result.done():
self.info = _parse_xboard_post(line, engine.board, info)
|
https://github.com/niklasf/python-chess/issues/379
|
Exception in callback EngineProtocol.pipe_data_received(1, b'you play bo...Drawn game}\n')
handle: <Handle EngineProtocol.pipe_data_received(1, b'you play bo...Drawn game}\n')>
Traceback (most recent call last):
File "/usr/lib/python3.5/asyncio/events.py", line 126, in _run
self._callback(*self._args)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 602, in pipe_data_received
self._line_received(line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 615, in _line_received
self.command._line_received(self, line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 869, in _line_received
self.line_received(engine, line)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 1716, in line_received
self._move(engine, line.split(" ", 1)[1])
File "/home/pascal/.local/lib/python3.5/site-packages/chess/engine.py", line 1748, in _move
self.result.set_result(PlayResult(move, None, self.info, self.draw_offered))
File "/usr/lib/python3.5/asyncio/futures.py", line 348, in set_result
raise InvalidStateError('{}: {!r}'.format(self._state, self))
asyncio.futures.InvalidStateError: FINISHED: <Future finished result=<PlayResult a...ffered=False)>>
Traceback (most recent call last):
File "./tournament.py", line 96, in <module>
board.push(result.move)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/__init__.py", line 1942, in push
move = self._to_chess960(move)
File "/home/pascal/.local/lib/python3.5/site-packages/chess/__init__.py", line 3318, in _to_chess960
if move.from_square == E1 and self.kings & BB_E1:
AttributeError: 'NoneType' object has no attribute 'from_square'
|
asyncio.futures.InvalidStateError
|
async def content_as_text(self, max_concurrency=1, encoding="UTF-8"):
"""Download the contents of this blob, and decode as text.
This operation is blocking until all data is downloaded.
:param int max_concurrency:
The number of parallel connections with which to download.
:param str encoding:
Test encoding to decode the downloaded bytes. Default is UTF-8.
:rtype: str
"""
warnings.warn(
"content_as_text is deprecated, use readall instead", DeprecationWarning
)
self._max_concurrency = max_concurrency
self._encoding = encoding
return await self.readall()
|
async def content_as_text(self, max_concurrency=1, encoding="UTF-8"):
"""Download the contents of this blob, and decode as text.
This operation is blocking until all data is downloaded.
:keyword int max_concurrency:
The number of parallel connections with which to download.
:param str encoding:
Test encoding to decode the downloaded bytes. Default is UTF-8.
:rtype: str
"""
warnings.warn(
"content_as_text is deprecated, use readall instead", DeprecationWarning
)
self._max_concurrency = max_concurrency
self._encoding = encoding
return await self.readall()
|
https://github.com/Azure/azure-sdk-for-python/issues/14319
|
ERROR:Task exception was never retrieved
future: <Task finished coro=<_AsyncChunkDownloader.process_chunk() done, defined at /usr/local/lib/python3.6/dist-packages/azure/storage/blob/aio/_download_async.py:53> exception=ResourceModifiedError('The condition specified using HTTP conditional header(s) is not met.\nRequestId:XXX\nTime:2020-10-06T03:09:34.2866006Z\nErrorCode:ConditionNotMet\nError:None',)>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/azure/storage/blob/aio/_download_async.py", line 101, in _download_chunk
**self.request_options
File "/usr/local/lib/python3.6/dist-packages/azure/storage/blob/_generated/aio/operations_async/_blob_operations_async.py", line 180, in download
raise models.StorageErrorException(response, self._deserialize)
azure.storage.blob._generated.models._models_py3.StorageErrorException: Operation returned an invalid status 'The condition specified using HTTP conditional header(s) is not met.'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/azure/storage/blob/aio/_download_async.py", line 55, in process_chunk
chunk_data = await self._download_chunk(chunk_start, chunk_end - 1)
File "/usr/local/lib/python3.6/dist-packages/azure/storage/blob/aio/_download_async.py", line 104, in _download_chunk
process_storage_error(error)
File "/usr/local/lib/python3.6/dist-packages/azure/storage/blob/_shared/response_handlers.py", line 147, in process_storage_error
raise error
azure.core.exceptions.ResourceModifiedError: The condition specified using HTTP conditional header(s) is not met.
Time:2020-10-06T03:09:34.2866006Z
ErrorCode:ConditionNotMet
Error:None
|
azure.storage.blob._generated.models._models_py3.StorageErrorException
|
async def readinto(self, stream):
"""Download the contents of this blob to a stream.
:param stream:
The stream to download to. This can be an open file-handle,
or any writable stream. The stream must be seekable if the download
uses more than one parallel connection.
:returns: The number of bytes read.
:rtype: int
"""
# the stream must be seekable if parallel download is required
parallel = self._max_concurrency > 1
if parallel:
error_message = "Target stream handle must be seekable."
if sys.version_info >= (3,) and not stream.seekable():
raise ValueError(error_message)
try:
stream.seek(stream.tell())
except (NotImplementedError, AttributeError):
raise ValueError(error_message)
# Write the content to the user stream
stream.write(self._current_content)
if self._download_complete:
return self.size
data_end = self._file_size
if self._end_range is not None:
# Use the length unless it is over the end of the file
data_end = min(self._file_size, self._end_range + 1)
downloader = _AsyncChunkDownloader(
client=self._clients.blob,
non_empty_ranges=self._non_empty_ranges,
total_size=self.size,
chunk_size=self._config.max_chunk_get_size,
current_progress=self._first_get_size,
start_range=self._initial_range[1] + 1, # start where the first download ended
end_range=data_end,
stream=stream,
parallel=parallel,
validate_content=self._validate_content,
encryption_options=self._encryption_options,
use_location=self._location_mode,
**self._request_options,
)
dl_tasks = downloader.get_chunk_offsets()
running_futures = [
asyncio.ensure_future(downloader.process_chunk(d))
for d in islice(dl_tasks, 0, self._max_concurrency)
]
while running_futures:
# Wait for some download to finish before adding a new one
done, running_futures = await asyncio.wait(
running_futures, return_when=asyncio.FIRST_COMPLETED
)
try:
for task in done:
task.result()
except HttpResponseError as error:
process_storage_error(error)
try:
next_chunk = next(dl_tasks)
except StopIteration:
break
else:
running_futures.add(
asyncio.ensure_future(downloader.process_chunk(next_chunk))
)
if running_futures:
# Wait for the remaining downloads to finish
done, _running_futures = await asyncio.wait(running_futures)
try:
for task in done:
task.result()
except HttpResponseError as error:
process_storage_error(error)
return self.size
|
async def readinto(self, stream):
"""Download the contents of this blob to a stream.
:param stream:
The stream to download to. This can be an open file-handle,
or any writable stream. The stream must be seekable if the download
uses more than one parallel connection.
:returns: The number of bytes read.
:rtype: int
"""
# the stream must be seekable if parallel download is required
parallel = self._max_concurrency > 1
if parallel:
error_message = "Target stream handle must be seekable."
if sys.version_info >= (3,) and not stream.seekable():
raise ValueError(error_message)
try:
stream.seek(stream.tell())
except (NotImplementedError, AttributeError):
raise ValueError(error_message)
# Write the content to the user stream
stream.write(self._current_content)
if self._download_complete:
return self.size
data_end = self._file_size
if self._end_range is not None:
# Use the length unless it is over the end of the file
data_end = min(self._file_size, self._end_range + 1)
downloader = _AsyncChunkDownloader(
client=self._clients.blob,
non_empty_ranges=self._non_empty_ranges,
total_size=self.size,
chunk_size=self._config.max_chunk_get_size,
current_progress=self._first_get_size,
start_range=self._initial_range[1] + 1, # start where the first download ended
end_range=data_end,
stream=stream,
parallel=parallel,
validate_content=self._validate_content,
encryption_options=self._encryption_options,
use_location=self._location_mode,
**self._request_options,
)
dl_tasks = downloader.get_chunk_offsets()
running_futures = [
asyncio.ensure_future(downloader.process_chunk(d))
for d in islice(dl_tasks, 0, self._max_concurrency)
]
while running_futures:
# Wait for some download to finish before adding a new one
_done, running_futures = await asyncio.wait(
running_futures, return_when=asyncio.FIRST_COMPLETED
)
try:
next_chunk = next(dl_tasks)
except StopIteration:
break
else:
running_futures.add(
asyncio.ensure_future(downloader.process_chunk(next_chunk))
)
if running_futures:
# Wait for the remaining downloads to finish
await asyncio.wait(running_futures)
return self.size
|
https://github.com/Azure/azure-sdk-for-python/issues/14319
|
ERROR:Task exception was never retrieved
future: <Task finished coro=<_AsyncChunkDownloader.process_chunk() done, defined at /usr/local/lib/python3.6/dist-packages/azure/storage/blob/aio/_download_async.py:53> exception=ResourceModifiedError('The condition specified using HTTP conditional header(s) is not met.\nRequestId:XXX\nTime:2020-10-06T03:09:34.2866006Z\nErrorCode:ConditionNotMet\nError:None',)>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/azure/storage/blob/aio/_download_async.py", line 101, in _download_chunk
**self.request_options
File "/usr/local/lib/python3.6/dist-packages/azure/storage/blob/_generated/aio/operations_async/_blob_operations_async.py", line 180, in download
raise models.StorageErrorException(response, self._deserialize)
azure.storage.blob._generated.models._models_py3.StorageErrorException: Operation returned an invalid status 'The condition specified using HTTP conditional header(s) is not met.'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/azure/storage/blob/aio/_download_async.py", line 55, in process_chunk
chunk_data = await self._download_chunk(chunk_start, chunk_end - 1)
File "/usr/local/lib/python3.6/dist-packages/azure/storage/blob/aio/_download_async.py", line 104, in _download_chunk
process_storage_error(error)
File "/usr/local/lib/python3.6/dist-packages/azure/storage/blob/_shared/response_handlers.py", line 147, in process_storage_error
raise error
azure.core.exceptions.ResourceModifiedError: The condition specified using HTTP conditional header(s) is not met.
Time:2020-10-06T03:09:34.2866006Z
ErrorCode:ConditionNotMet
Error:None
|
azure.storage.blob._generated.models._models_py3.StorageErrorException
|
async def download_to_stream(self, stream, max_concurrency=1):
"""Download the contents of this blob to a stream.
:param stream:
The stream to download to. This can be an open file-handle,
or any writable stream. The stream must be seekable if the download
uses more than one parallel connection.
:param int max_concurrency:
The number of parallel connections with which to download.
:returns: The properties of the downloaded blob.
:rtype: Any
"""
warnings.warn(
"download_to_stream is deprecated, use readinto instead", DeprecationWarning
)
self._max_concurrency = max_concurrency
await self.readinto(stream)
return self.properties
|
async def download_to_stream(self, stream, max_concurrency=1):
"""Download the contents of this blob to a stream.
:param stream:
The stream to download to. This can be an open file-handle,
or any writable stream. The stream must be seekable if the download
uses more than one parallel connection.
:returns: The properties of the downloaded blob.
:rtype: Any
"""
warnings.warn(
"download_to_stream is deprecated, use readinto instead", DeprecationWarning
)
self._max_concurrency = max_concurrency
await self.readinto(stream)
return self.properties
|
https://github.com/Azure/azure-sdk-for-python/issues/14319
|
ERROR:Task exception was never retrieved
future: <Task finished coro=<_AsyncChunkDownloader.process_chunk() done, defined at /usr/local/lib/python3.6/dist-packages/azure/storage/blob/aio/_download_async.py:53> exception=ResourceModifiedError('The condition specified using HTTP conditional header(s) is not met.\nRequestId:XXX\nTime:2020-10-06T03:09:34.2866006Z\nErrorCode:ConditionNotMet\nError:None',)>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/azure/storage/blob/aio/_download_async.py", line 101, in _download_chunk
**self.request_options
File "/usr/local/lib/python3.6/dist-packages/azure/storage/blob/_generated/aio/operations_async/_blob_operations_async.py", line 180, in download
raise models.StorageErrorException(response, self._deserialize)
azure.storage.blob._generated.models._models_py3.StorageErrorException: Operation returned an invalid status 'The condition specified using HTTP conditional header(s) is not met.'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/azure/storage/blob/aio/_download_async.py", line 55, in process_chunk
chunk_data = await self._download_chunk(chunk_start, chunk_end - 1)
File "/usr/local/lib/python3.6/dist-packages/azure/storage/blob/aio/_download_async.py", line 104, in _download_chunk
process_storage_error(error)
File "/usr/local/lib/python3.6/dist-packages/azure/storage/blob/_shared/response_handlers.py", line 147, in process_storage_error
raise error
azure.core.exceptions.ResourceModifiedError: The condition specified using HTTP conditional header(s) is not met.
Time:2020-10-06T03:09:34.2866006Z
ErrorCode:ConditionNotMet
Error:None
|
azure.storage.blob._generated.models._models_py3.StorageErrorException
|
def _create_pipeline(self, credential, **kwargs):
# type: (Any, **Any) -> Tuple[Configuration, Pipeline]
self._credential_policy = None
if hasattr(credential, "get_token"):
self._credential_policy = BearerTokenCredentialPolicy(
credential, STORAGE_OAUTH_SCOPE
)
elif isinstance(credential, SharedKeyCredentialPolicy):
self._credential_policy = credential
elif credential is not None:
raise TypeError("Unsupported credential: {}".format(credential))
config = kwargs.get("_configuration") or create_configuration(**kwargs)
if kwargs.get("_pipeline"):
return config, kwargs["_pipeline"]
config.transport = kwargs.get("transport") # type: ignore
kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
kwargs.setdefault("read_timeout", READ_TIMEOUT)
if not config.transport:
config.transport = RequestsTransport(**kwargs)
policies = [
QueueMessagePolicy(),
config.proxy_policy,
config.user_agent_policy,
StorageContentValidation(),
ContentDecodePolicy(response_encoding="utf-8"),
RedirectPolicy(**kwargs),
StorageHosts(hosts=self._hosts, **kwargs),
config.retry_policy,
config.headers_policy,
StorageRequestHook(**kwargs),
self._credential_policy,
config.logging_policy,
StorageResponseHook(**kwargs),
DistributedTracingPolicy(**kwargs),
HttpLoggingPolicy(**kwargs),
]
if kwargs.get("_additional_pipeline_policies"):
policies = policies + kwargs.get("_additional_pipeline_policies")
return config, Pipeline(config.transport, policies=policies)
|
def _create_pipeline(self, credential, **kwargs):
# type: (Any, **Any) -> Tuple[Configuration, Pipeline]
self._credential_policy = None
if hasattr(credential, "get_token"):
self._credential_policy = BearerTokenCredentialPolicy(
credential, STORAGE_OAUTH_SCOPE
)
elif isinstance(credential, SharedKeyCredentialPolicy):
self._credential_policy = credential
elif credential is not None:
raise TypeError("Unsupported credential: {}".format(credential))
config = kwargs.get("_configuration") or create_configuration(**kwargs)
if kwargs.get("_pipeline"):
return config, kwargs["_pipeline"]
config.transport = kwargs.get("transport") # type: ignore
kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
kwargs.setdefault("read_timeout", READ_TIMEOUT)
if not config.transport:
config.transport = RequestsTransport(**kwargs)
policies = [
QueueMessagePolicy(),
config.headers_policy,
config.proxy_policy,
config.user_agent_policy,
StorageContentValidation(),
StorageRequestHook(**kwargs),
self._credential_policy,
ContentDecodePolicy(response_encoding="utf-8"),
RedirectPolicy(**kwargs),
StorageHosts(hosts=self._hosts, **kwargs),
config.retry_policy,
config.logging_policy,
StorageResponseHook(**kwargs),
DistributedTracingPolicy(**kwargs),
HttpLoggingPolicy(**kwargs),
]
if kwargs.get("_additional_pipeline_policies"):
policies = policies + kwargs.get("_additional_pipeline_policies")
return config, Pipeline(config.transport, policies=policies)
|
https://github.com/Azure/azure-sdk-for-python/issues/14067
|
Fatal read error on socket transport
protocol: <asyncio.sslproto.SSLProtocol object at 0x7f1cf667a5c0>
transport: <_SelectorSocketTransport fd=121 read=polling write=<idle, bufsize=0>>
Traceback (most recent call last):
File "/home/azureuser/genfiles/external/python_runtime/python3/lib/python3.6/asyncio/selector_events.py", line 727, in _read_ready
data = self._sock.recv(self.max_size)
TimeoutError: [Errno 110] Connection timed out
ERROR 2020-09-08 16:08:45,961 customclass load_blob_file status=error, duration_ms=958266.8999999999
Traceback (most recent call last):
File "/home/azureuser/bin/azureuser/azure/storage/blob/aio/_download_async.py", line 271, in _initial_request
**self._request_options)
File "/home/azureuser/bin/azureuser/azure/storage/blob/_generated/aio/operations_async/_blob_operations_async.py", line 169, in download
raise models.StorageErrorException(response, self._deserialize)
azure.storage.blob._generated.models._models_py3.StorageErrorException: Operation returned an invalid status 'Server failed to authenticate the request. Make sure the value of Authorization header is formed correctly including the signature.'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/azureuser/bin/azureuser/services/storage/blob_storage/blob_loader_storage.py", line 128, in load_blob_file
file = self._hot_storage.load_file(file_id)
File "/home/azureuser/bin/azureuser/services/storage/blob_storage/azure_blob_storage.py", line 60, in load_file
loop=self._get_or_create_event_loop(),
File "/home/azureuser/genfiles/external/python_runtime/python3/lib/python3.6/concurrent/futures/_base.py", line 432, in result
return self.__get_result()
File "/home/azureuser/genfiles/external/python_runtime/python3/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/home/azureuser/bin/azureuser/services/storage/blob_storage/azure_blob_storage.py", line 79, in _load_blob_async_into_byte_stream
storage_stream_downloader = await blob_client.download_blob()
File "/home/azureuser/bin/azureuser/azure/core/tracing/decorator_async.py", line 74, in wrapper_use_tracer
return await func(*args, **kwargs)
File "/home/azureuser/bin/azureuser/azure/storage/blob/aio/_blob_client_async.py", line 335, in download_blob
await downloader._setup() # pylint: disable=protected-access
File "/home/azureuser/bin/azureuser/azure/storage/blob/aio/_download_async.py", line 225, in _setup
self._response = await self._initial_request()
File "/home/azureuser/bin/azureuser/azure/storage/blob/aio/_download_async.py", line 306, in _initial_request
process_storage_error(error)
File "/home/azureuser/bin/azureuser/azure/storage/blob/_shared/response_handlers.py", line 147, in process_storage_error
raise error
azure.core.exceptions.ClientAuthenticationError: Server failed to authenticate the request. Make sure the value of Authorization header is formed correctly including the signature.
RequestId:4a1422e8-101e-0039-72fa-85b01b000000
Time:2020-09-08T16:08:45.9594457Z
ErrorCode:AuthenticationFailed
Error:None
AuthenticationErrorDetail:Request date header too old: 'Tue, 08 Sep 2020 15:52:47 GMT'
ERROR 2020-09-08 16:08:45,963 customclass loader_fetch_batch_load_data status=error, duration_ms=958596.6
Traceback (most recent call last):
File "/home/azureuser/bin/azureuser/azure/storage/blob/aio/_download_async.py", line 271, in _initial_request
**self._request_options)
File "/home/azureuser/bin/azureuser/azure/storage/blob/_generated/aio/operations_async/_blob_operations_async.py", line 169, in download
raise models.StorageErrorException(response, self._deserialize)
azure.storage.blob._generated.models._models_py3.StorageErrorException: Operation returned an invalid status 'Server failed to authenticate the request. Make sure the value of Authorization header is formed correctly including the signature.'
|
TimeoutError
|
def apply_gradients(self, grads_and_vars, name: Optional[str] = None, **kwargs):
"""Apply gradients to variables for each optimizer.
On the first call to `apply_gradients()`, compute the mapping from variables to
optimizers and cache it in the `self.var_opt_mapping` dict for serialization and
faster access.
"""
if self.var_opt_mapping is None:
# Convert `grads_and_vars` to list so we can iterate multiple times over it
grads_and_vars = list(grads_and_vars)
self._compute_var_opt_mapping(grads_and_vars)
# Split gradients and variables into a separate list for each optimizer
grad_var_lists = [[] for _ in range(len(self.pred_opt_pairs) + 1)]
for grad, var in grads_and_vars:
if var.name in self.var_opt_mapping:
grad_var_lists[self.var_opt_mapping[var.name]].append((grad, var))
with tf.init_scope():
for optimizer, opt_grads_and_vars in zip(self.optimizers, grad_var_lists):
optimizer._create_slots([v for (_, v) in grads_and_vars])
return tf.distribute.get_replica_context().merge_call(
self._apply_gradients, args=(grad_var_lists, name), kwargs=kwargs
)
|
def apply_gradients(self, grads_and_vars, name: Optional[str] = None, **kwargs):
"""Apply gradients to variables for each optimizer.
On the first call to `apply_gradients()`, compute the mapping from variables to
optimizers and cache it in the `self.var_opt_mapping` dict for serialization and
faster access.
"""
if self.var_opt_mapping is None:
# Convert `grads_and_vars` to list so we can iterate multiple times over it
grads_and_vars = list(grads_and_vars)
self._compute_var_opt_mapping(grads_and_vars)
# Split gradients and variables into a separate list for each optimizer
grad_var_lists = [[] for _ in range(len(self.pred_opt_pairs) + 1)]
for grad, var in grads_and_vars:
if var.name in self.var_opt_mapping:
grad_var_lists[self.var_opt_mapping[var.name]].append((grad, var))
# Apply gradients to each optimizer
with tf.name_scope(self._name):
train_ops = [
optimizer.apply_gradients(opt_grads_and_vars, **kwargs)
for optimizer, opt_grads_and_vars in zip(self.optimizers, grad_var_lists)
]
return tf.group(*train_ops, name=name or "train_with_group")
|
https://github.com/larq/larq/issues/396
|
WARNING:tensorflow:There is non-GPU devices in `tf.distribute.Strategy`, not using nccl allreduce.
distributed training: False
Train on 60000 samples
60000/60000 [==============================] - 4s 61us/sample - loss: 8.2390
Successfully fitted model
distributed training: True
Train on 60000 samples
INFO:tensorflow:Error reported to Coordinator: list index out of range
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/training/coordinator.py", line 297, in stop_on_exception
yield
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 190, in _call_for_each_replica
**merge_kwargs)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py", line 446, in _distributed_apply
ds_reduce_util.ReduceOp.SUM, grads_and_vars)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/distribute_lib.py", line 1481, in batch_reduce_to
return self._batch_reduce_to(reduce_op, value_destination_pairs)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 707, in _batch_reduce_to
value_destination_pairs)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/cross_device_ops.py", line 317, in batch_reduce
value_destination_pairs[0][0].values) == 1:
IndexError: list index out of range
32/60000 [..............................] - ETA: 10:32Exception raised:
list index out of range
|
IndexError
|
def __init__(self, layer: tf.keras.layers.Layer):
self._layer = layer
weights = layer.weights
if isinstance(layer, tf.keras.layers.BatchNormalization):
fused_pairs = [("beta", "moving_mean"), ("gamma", "moving_variance")]
for pair in fused_pairs:
names = [w.name.split("/")[-1].replace(":0", "") for w in weights]
if pair[0] in names and pair[1] in names:
weights.pop(names.index(pair[0]))
self.weight_profiles = [
WeightProfile(
weight,
trainable=any(weight is w for w in layer.trainable_weights),
)
for weight in weights
]
self.op_profiles = []
if isinstance(layer, mac_containing_layers) and self.output_pixels:
for p in self.weight_profiles:
if not p.is_bias():
self.op_profiles.append(
OperationProfile(
n=p.count * self.output_pixels,
precision=max(self.input_precision or 32, p.bitwidth),
op_type="mac",
)
)
|
def __init__(self, layer: tf.keras.layers.Layer):
self._layer = layer
weights = layer.weights
if isinstance(layer, tf.keras.layers.BatchNormalization):
fused_pairs = [("beta", "moving_mean"), ("gamma", "moving_variance")]
for pair in fused_pairs:
names = [w.name.split("/")[-1].replace(":0", "") for w in weights]
if pair[0] in names and pair[1] in names:
weights.pop(names.index(pair[0]))
self.weight_profiles = [
WeightProfile(
weight,
trainable=any(weight is w for w in layer.trainable_weights),
)
for weight in weights
]
self.op_profiles = []
if isinstance(layer, mac_containing_layers):
for p in self.weight_profiles:
if not p.is_bias():
self.op_profiles.append(
OperationProfile(
n=p.count * self.output_pixels,
precision=max(self.input_precision or 32, p.bitwidth),
op_type="mac",
)
)
|
https://github.com/larq/larq/issues/479
|
Traceback (most recent call last):
File "C:/Users/User/PycharmProjects/BNN-Playground/summary_bug.py", line 68, in <module>
cli()
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 782, in main
rv = self.invoke(ctx)
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 610, in invoke
return callback(*args, **kwargs)
File "C:\Users\User\Anaconda3\lib\site-packages\zookeeper\core\task.py", line 59, in command
task_instance.run()
File "C:/Users/User/PycharmProjects/BNN-Playground/summary_bug.py", line 64, in run
larq.models.summary(model)
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 466, in summary
model_profile = ModelProfile(model)
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 261, in __init__
self.layer_profiles = [LayerProfile(l) for l in model.layers]
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 261, in <listcomp>
self.layer_profiles = [LayerProfile(l) for l in model.layers]
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 158, in __init__
n=p.count * self.output_pixels,
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 227, in output_pixels
if len(self.output_shape) == 4:
TypeError: object of type 'NoneType' has no len()
|
TypeError
|
def op_count(
self, op_type: Optional[str] = None, precision: Optional[int] = None
) -> Optional[int]:
if op_type != "mac":
raise ValueError("Currently only counting of MAC-operations is supported.")
if isinstance(self._layer, op_count_supported_layer_types) and self.output_pixels:
count = 0
for op in self.op_profiles:
if (precision is None or op.precision == precision) and (
op_type is None or op.op_type == op_type
):
count += op.n
return count
return None
|
def op_count(
self, op_type: Optional[str] = None, precision: Optional[int] = None
) -> Optional[int]:
if op_type != "mac":
raise ValueError("Currently only counting of MAC-operations is supported.")
if isinstance(self._layer, op_count_supported_layer_types):
count = 0
for op in self.op_profiles:
if (precision is None or op.precision == precision) and (
op_type is None or op.op_type == op_type
):
count += op.n
return count
return None
|
https://github.com/larq/larq/issues/479
|
Traceback (most recent call last):
File "C:/Users/User/PycharmProjects/BNN-Playground/summary_bug.py", line 68, in <module>
cli()
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 782, in main
rv = self.invoke(ctx)
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 610, in invoke
return callback(*args, **kwargs)
File "C:\Users\User\Anaconda3\lib\site-packages\zookeeper\core\task.py", line 59, in command
task_instance.run()
File "C:/Users/User/PycharmProjects/BNN-Playground/summary_bug.py", line 64, in run
larq.models.summary(model)
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 466, in summary
model_profile = ModelProfile(model)
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 261, in __init__
self.layer_profiles = [LayerProfile(l) for l in model.layers]
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 261, in <listcomp>
self.layer_profiles = [LayerProfile(l) for l in model.layers]
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 158, in __init__
n=p.count * self.output_pixels,
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 227, in output_pixels
if len(self.output_shape) == 4:
TypeError: object of type 'NoneType' has no len()
|
TypeError
|
def output_pixels(self) -> Optional[int]:
"""Number of pixels for a single feature map (1 for fully connected layers)."""
if not self.output_shape:
return None
if len(self.output_shape) == 4:
return int(np.prod(self.output_shape[1:3]))
if len(self.output_shape) == 2:
return 1
raise NotImplementedError()
|
def output_pixels(self) -> int:
"""Number of pixels for a single feature map (1 for fully connected layers)."""
if len(self.output_shape) == 4:
return int(np.prod(self.output_shape[1:3]))
elif len(self.output_shape) == 2:
return 1
else:
raise NotImplementedError()
|
https://github.com/larq/larq/issues/479
|
Traceback (most recent call last):
File "C:/Users/User/PycharmProjects/BNN-Playground/summary_bug.py", line 68, in <module>
cli()
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 782, in main
rv = self.invoke(ctx)
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 610, in invoke
return callback(*args, **kwargs)
File "C:\Users\User\Anaconda3\lib\site-packages\zookeeper\core\task.py", line 59, in command
task_instance.run()
File "C:/Users/User/PycharmProjects/BNN-Playground/summary_bug.py", line 64, in run
larq.models.summary(model)
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 466, in summary
model_profile = ModelProfile(model)
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 261, in __init__
self.layer_profiles = [LayerProfile(l) for l in model.layers]
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 261, in <listcomp>
self.layer_profiles = [LayerProfile(l) for l in model.layers]
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 158, in __init__
n=p.count * self.output_pixels,
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 227, in output_pixels
if len(self.output_shape) == 4:
TypeError: object of type 'NoneType' has no len()
|
TypeError
|
def apply_gradients(self, grads_and_vars, name=None):
bin_grads_and_vars, fp_grads_and_vars = [], []
for grad, var in grads_and_vars:
if self.is_binary(var):
bin_grads_and_vars.append((grad, var))
else:
fp_grads_and_vars.append((grad, var))
bin_train_op = super().apply_gradients(bin_grads_and_vars, name=name)
fp_train_op = self.fp_optimizer.apply_gradients(fp_grads_and_vars, name=name)
return tf.group(bin_train_op, fp_train_op, name="train_with_bop")
|
def apply_gradients(self, grads_and_vars, name=None):
bin_grads_and_vars = [(g, v) for g, v in grads_and_vars if self.is_binary(v)]
fp_grads_and_vars = [(g, v) for g, v in grads_and_vars if not self.is_binary(v)]
bin_train_op = super().apply_gradients(bin_grads_and_vars, name=name)
fp_train_op = self.fp_optimizer.apply_gradients(fp_grads_and_vars, name=name)
return tf.group(bin_train_op, fp_train_op, name="train_with_bop")
|
https://github.com/larq/larq/issues/286
|
2019-10-11 13:45:47 UTC -- Epoch 1/150
2019-10-11 13:45:50 UTC -- Traceback (most recent call last):
2019-10-11 13:45:50 UTC -- File "/usr/local/bin/nf", line 11, in <module>
2019-10-11 13:45:50 UTC -- load_entry_point('project-final', 'console_scripts', 'nf')()
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 764, in __call__
2019-10-11 13:45:50 UTC -- return self.main(*args, **kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 717, in main
2019-10-11 13:45:50 UTC -- rv = self.invoke(ctx)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 1137, in invoke
2019-10-11 13:45:50 UTC -- return _process_result(sub_ctx.command.invoke(sub_ctx))
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 956, in invoke
2019-10-11 13:45:50 UTC -- return ctx.invoke(self.callback, **ctx.params)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 555, in invoke
2019-10-11 13:45:50 UTC -- return callback(*args, **kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/zookeeper/cli.py", line 114, in train
2019-10-11 13:45:50 UTC -- function(build_model, dataset, hparams, output_dir, **kwargs)
2019-10-11 13:45:50 UTC -- File "/code/project_final/train.py", line 110, in train
2019-10-11 13:45:50 UTC -- callbacks=callbacks,
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training.py", line 728, in fit
2019-10-11 13:45:50 UTC -- use_multiprocessing=use_multiprocessing)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_v2.py", line 324, in fit
2019-10-11 13:45:50 UTC -- total_epochs=epochs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_v2.py", line 123, in run_one_epoch
2019-10-11 13:45:50 UTC -- batch_outs = execution_function(iterator)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_v2_utils.py", line 86, in execution_function
2019-10-11 13:45:50 UTC -- distributed_function(input_fn))
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/def_function.py", line 457, in __call__
2019-10-11 13:45:50 UTC -- result = self._call(*args, **kwds)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/def_function.py", line 503, in _call
2019-10-11 13:45:50 UTC -- self._initialize(args, kwds, add_initializers_to=initializer_map)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/def_function.py", line 408, in _initialize
2019-10-11 13:45:50 UTC -- *args, **kwds))
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/function.py", line 1848, in _get_concrete_function_internal_garbage_collected
2019-10-11 13:45:50 UTC -- graph_function, _, _ = self._maybe_define_function(args, kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/function.py", line 2150, in _maybe_define_function
2019-10-11 13:45:50 UTC -- graph_function = self._create_graph_function(args, kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/function.py", line 2041, in _create_graph_function
2019-10-11 13:45:50 UTC -- capture_by_value=self._capture_by_value),
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/func_graph.py", line 915, in func_graph_from_py_func
2019-10-11 13:45:50 UTC -- func_outputs = python_func(*func_args, **func_kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/def_function.py", line 358, in wrapped_fn
2019-10-11 13:45:50 UTC -- return weak_wrapped_fn().__wrapped__(*args, **kwds)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_v2_utils.py", line 73, in distributed_function
2019-10-11 13:45:50 UTC -- per_replica_function, args=(model, x, y, sample_weights))
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/distribute_lib.py", line 760, in experimental_run_v2
2019-10-11 13:45:50 UTC -- return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/distribute_lib.py", line 1787, in call_for_each_replica
2019-10-11 13:45:50 UTC -- return self._call_for_each_replica(fn, args, kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 661, in _call_for_each_replica
2019-10-11 13:45:50 UTC -- fn, args, kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 196, in _call_for_each_replica
2019-10-11 13:45:50 UTC -- coord.join(threads)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/training/coordinator.py", line 389, in join
2019-10-11 13:45:50 UTC -- six.reraise(*self._exc_info_to_raise)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/six.py", line 693, in reraise
2019-10-11 13:45:50 UTC -- raise value
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/training/coordinator.py", line 297, in stop_on_exception
2019-10-11 13:45:50 UTC -- yield
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 190, in _call_for_each_replica
2019-10-11 13:45:50 UTC -- **merge_kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py", line 446, in _distributed_apply
2019-10-11 13:45:50 UTC -- ds_reduce_util.ReduceOp.SUM, grads_and_vars)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/distribute_lib.py", line 1481, in batch_reduce_to
2019-10-11 13:45:50 UTC -- return self._batch_reduce_to(reduce_op, value_destination_pairs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 707, in _batch_reduce_to
2019-10-11 13:45:50 UTC -- value_destination_pairs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/cross_device_ops.py", line 317, in batch_reduce
2019-10-11 13:45:50 UTC -- value_destination_pairs[0][0].values) == 1:
2019-10-11 13:45:50 UTC -- IndexError: list index out of range
|
IndexError
|
def dense_passage_retrieval():
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
ml_logger = MLFlowLogger(tracking_uri="https://public-mlflow.deepset.ai/")
ml_logger.init_experiment(
experiment_name="FARM-dense_passage_retrieval", run_name="Run_dpr"
)
##########################
########## Settings
##########################
set_all_seeds(seed=42)
batch_size = 4
n_epochs = 3
distributed = False # enable for multi GPU training via DDP
evaluate_every = 1000
question_lang_model = "bert-base-uncased"
passage_lang_model = "bert-base-uncased"
do_lower_case = True
use_fast = True
embed_title = True
num_hard_negatives = 1
similarity_function = "dot_product"
# data can be downloaded and unpacked into data_dir:
# https://dl.fbaipublicfiles.com/dpr/data/retriever/biencoder-nq-train.json.gz
# https://dl.fbaipublicfiles.com/dpr/data/retriever/biencoder-nq-dev.json.gz
data_dir = "../data/retriever"
train_filename = "biencoder-nq-train.json"
dev_filename = "biencoder-nq-dev.json"
test_filename = "biencoder-nq-dev.json"
max_samples = None # load a smaller dataset (e.g. for debugging)
# For multi GPU Training via DDP we need to get the local rank
args = parse_arguments()
device, n_gpu = initialize_device_settings(
use_cuda=True, local_rank=args.local_rank
)
# 1.Create question and passage tokenizers
query_tokenizer = Tokenizer.load(
pretrained_model_name_or_path=question_lang_model,
do_lower_case=do_lower_case,
use_fast=use_fast,
)
passage_tokenizer = Tokenizer.load(
pretrained_model_name_or_path=passage_lang_model,
do_lower_case=do_lower_case,
use_fast=use_fast,
)
# 2. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
# data_dir "data/retriever" should contain DPR training and dev files downloaded from https://github.com/facebookresearch/DPR
# i.e., nq-train.json, nq-dev.json or trivia-train.json, trivia-dev.json
label_list = ["hard_negative", "positive"]
metric = "text_similarity_metric"
processor = TextSimilarityProcessor(
query_tokenizer=query_tokenizer,
passage_tokenizer=passage_tokenizer,
max_seq_len_query=64,
max_seq_len_passage=256,
label_list=label_list,
metric=metric,
data_dir=data_dir,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
embed_title=embed_title,
num_hard_negatives=num_hard_negatives,
max_samples=max_samples,
)
# 3. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them and calculates a few descriptive statistics of our datasets
# NOTE: In FARM, the dev set metrics differ from test set metrics in that they are calculated on a token level instead of a word level
data_silo = DataSilo(
processor=processor, batch_size=batch_size, distributed=distributed
)
# 4. Create an BiAdaptiveModel+
# a) which consists of 2 pretrained language models as a basis
question_language_model = LanguageModel.load(
pretrained_model_name_or_path=question_lang_model,
language_model_class="DPRQuestionEncoder",
)
passage_language_model = LanguageModel.load(
pretrained_model_name_or_path=passage_lang_model,
language_model_class="DPRContextEncoder",
)
# b) and a prediction head on top that is suited for our task => Question Answering
prediction_head = TextSimilarityHead(similarity_function=similarity_function)
model = BiAdaptiveModel(
language_model1=question_language_model,
language_model2=passage_language_model,
prediction_heads=[prediction_head],
embeds_dropout_prob=0.1,
lm1_output_types=["per_sequence"],
lm2_output_types=["per_sequence"],
device=device,
)
# 5. Create an optimizer
model, optimizer, lr_schedule = initialize_optimizer(
model=model,
learning_rate=1e-5,
optimizer_opts={
"name": "TransformersAdamW",
"correct_bias": True,
"weight_decay": 0.0,
"eps": 1e-08,
},
schedule_opts={"name": "LinearWarmup", "num_warmup_steps": 100},
n_batches=len(data_silo.loaders["train"]),
n_epochs=n_epochs,
grad_acc_steps=1,
device=device,
distributed=distributed,
)
# 6. Feed everything to the Trainer, which keeps care of growing our model and evaluates it from time to time
trainer = Trainer(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=device,
)
# 7. Let it grow! Watch the tracked metrics live on the public mlflow server: https://public-mlflow.deepset.ai
trainer.train()
# 8. Hooray! You have a model. Store it:
save_dir = Path("../saved_models/dpr-tutorial")
model.save(save_dir)
processor.save(save_dir)
# 9. Evaluate
test_data_loader = data_silo.get_data_loader("test")
if test_data_loader is not None:
evaluator_test = Evaluator(
data_loader=test_data_loader, tasks=data_silo.processor.tasks, device=device
)
model.connect_heads_with_processor(processor.tasks)
test_result = evaluator_test.eval(model)
|
def dense_passage_retrieval():
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
ml_logger = MLFlowLogger(tracking_uri="https://public-mlflow.deepset.ai/")
ml_logger.init_experiment(
experiment_name="FARM-dense_passage_retrieval", run_name="Run_dpr"
)
##########################
########## Settings
##########################
set_all_seeds(seed=42)
batch_size = 4
n_epochs = 3
distributed = False # enable for multi GPU training via DDP
evaluate_every = 1000
question_lang_model = "facebook/dpr-question_encoder-single-nq-base"
passage_lang_model = "facebook/dpr-ctx_encoder-single-nq-base"
do_lower_case = True
use_fast = True
embed_title = True
num_hard_negatives = 1
similarity_function = "dot_product"
train_filename = "nq-train.json"
dev_filename = "nq-dev.json"
test_filename = "nq-dev.json"
max_samples = None # load a smaller dataset (e.g. for debugging)
# For multi GPU Training via DDP we need to get the local rank
args = parse_arguments()
device, n_gpu = initialize_device_settings(
use_cuda=True, local_rank=args.local_rank
)
# 1.Create question and passage tokenizers
query_tokenizer = Tokenizer.load(
pretrained_model_name_or_path=question_lang_model,
do_lower_case=do_lower_case,
use_fast=use_fast,
)
passage_tokenizer = Tokenizer.load(
pretrained_model_name_or_path=passage_lang_model,
do_lower_case=do_lower_case,
use_fast=use_fast,
)
# 2. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
# data_dir "data/retriever" should contain DPR training and dev files downloaded from https://github.com/facebookresearch/DPR
# i.e., nq-train.json, nq-dev.json or trivia-train.json, trivia-dev.json
label_list = ["hard_negative", "positive"]
metric = "text_similarity_metric"
processor = TextSimilarityProcessor(
query_tokenizer=query_tokenizer,
passage_tokenizer=passage_tokenizer,
max_seq_len_query=64,
max_seq_len_passage=256,
label_list=label_list,
metric=metric,
data_dir="../data/retriever",
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
embed_title=embed_title,
num_hard_negatives=num_hard_negatives,
max_samples=max_samples,
)
# 3. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them and calculates a few descriptive statistics of our datasets
# NOTE: In FARM, the dev set metrics differ from test set metrics in that they are calculated on a token level instead of a word level
data_silo = DataSilo(
processor=processor, batch_size=batch_size, distributed=distributed
)
# 4. Create an BiAdaptiveModel+
# a) which consists of 2 pretrained language models as a basis
question_language_model = LanguageModel.load(
pretrained_model_name_or_path="bert-base-uncased",
language_model_class="DPRQuestionEncoder",
)
passage_language_model = LanguageModel.load(
pretrained_model_name_or_path="bert-base-uncased",
language_model_class="DPRContextEncoder",
)
# b) and a prediction head on top that is suited for our task => Question Answering
prediction_head = TextSimilarityHead(similarity_function=similarity_function)
model = BiAdaptiveModel(
language_model1=question_language_model,
language_model2=passage_language_model,
prediction_heads=[prediction_head],
embeds_dropout_prob=0.1,
lm1_output_types=["per_sequence"],
lm2_output_types=["per_sequence"],
device=device,
)
# 5. Create an optimizer
model, optimizer, lr_schedule = initialize_optimizer(
model=model,
learning_rate=1e-5,
optimizer_opts={
"name": "TransformersAdamW",
"correct_bias": True,
"weight_decay": 0.0,
"eps": 1e-08,
},
schedule_opts={"name": "LinearWarmup", "num_warmup_steps": 100},
n_batches=len(data_silo.loaders["train"]),
n_epochs=n_epochs,
grad_acc_steps=1,
device=device,
distributed=distributed,
)
# 6. Feed everything to the Trainer, which keeps care of growing our model and evaluates it from time to time
trainer = Trainer(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=device,
)
# 7. Let it grow! Watch the tracked metrics live on the public mlflow server: https://public-mlflow.deepset.ai
trainer.train()
# 8. Hooray! You have a model. Store it:
save_dir = Path("../saved_models/dpr-tutorial")
model.save(save_dir)
processor.save(save_dir)
# 9. Evaluate
test_data_loader = data_silo.get_data_loader("test")
if test_data_loader is not None:
evaluator_test = Evaluator(
data_loader=test_data_loader, tasks=data_silo.processor.tasks, device=device
)
model.connect_heads_with_processor(processor.tasks)
test_result = evaluator_test.eval(model)
|
https://github.com/deepset-ai/FARM/issues/714
|
Traceback (most recent call last):
File "/home/ubuntu/pycharm/FARM/examples/dpr_encoder.py", line 155, in <module>
dense_passage_retrieval()
File "/home/ubuntu/pycharm/FARM/examples/dpr_encoder.py", line 91, in dense_passage_retrieval
data_silo = DataSilo(processor=processor, batch_size=batch_size, distributed=distributed, max_processes=128)
File "/home/ubuntu/pycharm/FARM/farm/data_handler/data_silo.py", line 113, in __init__
self._load_data()
File "/home/ubuntu/pycharm/FARM/farm/data_handler/data_silo.py", line 272, in _load_data
self._calculate_statistics()
File "/home/ubuntu/pycharm/FARM/farm/data_handler/data_silo.py", line 454, in _calculate_statistics
seq_lens.extend(np.sum(train_input_numpy != self.processor.tokenizer.pad_token_id, axis=1))
AttributeError: 'NoneType' object has no attribute 'pad_token_id'
|
AttributeError
|
def _calculate_statistics(self):
"""Calculate and log simple summary statistics of the datasets"""
logger.info("")
logger.info("DATASETS SUMMARY")
logger.info("================")
self.counts = {}
if self.data["train"]:
self.counts["train"] = len(self.data["train"])
if "input_ids" in self.tensor_names:
clipped, ave_len, seq_lens, max_seq_len = (
self._calc_length_stats_single_encoder()
)
elif (
"query_input_ids" in self.tensor_names
and "passage_input_ids" in self.tensor_names
):
clipped, ave_len, seq_lens, max_seq_len = (
self._calc_length_stats_biencoder()
)
else:
logger.warning(
f"Could not compute length statistics because 'input_ids' or 'query_input_ids' and 'passage_input_ids' are missing."
)
clipped = -1
ave_len = -1
else:
self.counts["train"] = 0
if self.data["dev"]:
self.counts["dev"] = len(self.data["dev"])
else:
self.counts["dev"] = 0
if self.data["test"]:
self.counts["test"] = len(self.data["test"])
else:
self.counts["test"] = 0
logger.info("Examples in train: {}".format(self.counts["train"]))
logger.info("Examples in dev : {}".format(self.counts["dev"]))
logger.info("Examples in test : {}".format(self.counts["test"]))
logger.info("")
if self.data["train"]:
if "input_ids" in self.tensor_names:
logger.info(
"Longest sequence length observed after clipping: {}".format(
max(seq_lens)
)
)
logger.info("Average sequence length after clipping: {}".format(ave_len))
logger.info("Proportion clipped: {}".format(clipped))
if clipped > 0.5:
logger.info(
"[Farmer's Tip] {}% of your samples got cut down to {} tokens. "
"Consider increasing max_seq_len. "
"This will lead to higher memory consumption but is likely to "
"improve your model performance".format(
round(clipped * 100, 1), max_seq_len
)
)
elif (
"query_input_ids" in self.tensor_names
and "passage_input_ids" in self.tensor_names
):
logger.info(
"Longest query length observed after clipping: {} - for max_query_len: {}".format(
max(seq_lens[0]), max_seq_len[0]
)
)
logger.info(
"Average query length after clipping: {}".format(ave_len[0])
)
logger.info(
"Proportion queries clipped: {}".format(clipped[0])
)
logger.info("")
logger.info(
"Longest passage length observed after clipping: {} - for max_passage_len: {}".format(
max(seq_lens[1]), max_seq_len[1]
)
)
logger.info(
"Average passage length after clipping: {}".format(ave_len[1])
)
logger.info(
"Proportion passages clipped: {}".format(clipped[1])
)
MlLogger.log_params(
{
"n_samples_train": self.counts["train"],
"n_samples_dev": self.counts["dev"],
"n_samples_test": self.counts["test"],
"batch_size": self.batch_size,
"ave_seq_len": ave_len,
"clipped": clipped,
}
)
|
def _calculate_statistics(self):
"""Calculate and log simple summary statistics of the datasets"""
logger.info("")
logger.info("DATASETS SUMMARY")
logger.info("================")
self.counts = {}
if self.data["train"]:
self.counts["train"] = len(self.data["train"])
else:
self.counts["train"] = 0
if self.data["dev"]:
self.counts["dev"] = len(self.data["dev"])
else:
self.counts["dev"] = 0
if self.data["test"]:
self.counts["test"] = len(self.data["test"])
else:
self.counts["test"] = 0
seq_lens = []
if self.data["train"]:
for dataset in self.data["train"].datasets:
train_input_numpy = dataset[:][0].numpy()
seq_lens.extend(
np.sum(
train_input_numpy != self.processor.tokenizer.pad_token_id, axis=1
)
)
max_seq_len = dataset[:][0].shape[1]
self.clipped = np.mean(np.array(seq_lens) == max_seq_len) if seq_lens else 0
self.ave_len = np.mean(seq_lens) if seq_lens else 0
logger.info("Examples in train: {}".format(self.counts["train"]))
logger.info("Examples in dev : {}".format(self.counts["dev"]))
logger.info("Examples in test : {}".format(self.counts["test"]))
logger.info("")
if self.data["train"]:
logger.info(
"Longest sequence length observed after clipping: {}".format(
max(seq_lens)
)
)
logger.info("Average sequence length after clipping: {}".format(self.ave_len))
logger.info("Proportion clipped: {}".format(self.clipped))
if self.clipped > 0.5:
logger.info(
"[Farmer's Tip] {}% of your samples got cut down to {} tokens. "
"Consider increasing max_seq_len. "
"This will lead to higher memory consumption but is likely to "
"improve your model performance".format(
round(self.clipped * 100, 1), max_seq_len
)
)
MlLogger.log_params(
{
"n_samples_train": self.counts["train"],
"n_samples_dev": self.counts["dev"],
"n_samples_test": self.counts["test"],
"batch_size": self.batch_size,
"ave_seq_len": self.ave_len,
"clipped": self.clipped,
}
)
|
https://github.com/deepset-ai/FARM/issues/714
|
Traceback (most recent call last):
File "/home/ubuntu/pycharm/FARM/examples/dpr_encoder.py", line 155, in <module>
dense_passage_retrieval()
File "/home/ubuntu/pycharm/FARM/examples/dpr_encoder.py", line 91, in dense_passage_retrieval
data_silo = DataSilo(processor=processor, batch_size=batch_size, distributed=distributed, max_processes=128)
File "/home/ubuntu/pycharm/FARM/farm/data_handler/data_silo.py", line 113, in __init__
self._load_data()
File "/home/ubuntu/pycharm/FARM/farm/data_handler/data_silo.py", line 272, in _load_data
self._calculate_statistics()
File "/home/ubuntu/pycharm/FARM/farm/data_handler/data_silo.py", line 454, in _calculate_statistics
seq_lens.extend(np.sum(train_input_numpy != self.processor.tokenizer.pad_token_id, axis=1))
AttributeError: 'NoneType' object has no attribute 'pad_token_id'
|
AttributeError
|
def convert_features_to_dataset(features):
"""
Converts a list of feature dictionaries (one for each sample) into a PyTorch Dataset.
:param features: A list of dictionaries. Each dictionary corresponds to one sample. Its keys are the
names of the type of feature and the keys are the features themselves.
:Return: a Pytorch dataset and a list of tensor names.
"""
# features can be an empty list in cases where down sampling occurs (e.g. Natural Questions downsamples instances of is_impossible)
if len(features) == 0:
return None, None
tensor_names = list(features[0].keys())
all_tensors = []
for t_name in tensor_names:
# Conversion of floats
if t_name == "regression_label_ids":
cur_tensor = torch.tensor(
[sample[t_name] for sample in features], dtype=torch.float32
)
else:
try:
# Checking weather a non-integer will be silently converted to torch.long
check = features[0][t_name]
if isinstance(check, numbers.Number):
base = check
# extract a base variable from a nested lists or tuples
elif isinstance(check, list):
base = list(flatten_list(check))[0]
# extract a base variable from numpy arrays
else:
base = check.ravel()[0]
if not np.issubdtype(type(base), np.integer):
logger.warning(
f"Problem during conversion to torch tensors:\n"
f"A non-integer value for feature '{t_name}' with a value of: "
f"'{base}' will be converted to a torch tensor of dtype long."
)
except:
logger.warning(
f"Could not determine type for feature '{t_name}'. Converting now to a tensor of default type long."
)
# Convert all remaining python objects to torch long tensors
cur_tensor = torch.tensor(
[sample[t_name] for sample in features], dtype=torch.long
)
all_tensors.append(cur_tensor)
dataset = TensorDataset(*all_tensors)
return dataset, tensor_names
|
def convert_features_to_dataset(features):
"""
Converts a list of feature dictionaries (one for each sample) into a PyTorch Dataset.
:param features: A list of dictionaries. Each dictionary corresponds to one sample. Its keys are the
names of the type of feature and the keys are the features themselves.
:Return: a Pytorch dataset and a list of tensor names.
"""
# features can be an empty list in cases where down sampling occurs (e.g. Natural Questions downsamples instances of is_impossible)
if len(features) == 0:
return None, None
tensor_names = list(features[0].keys())
all_tensors = []
for t_name in tensor_names:
# Conversion of floats
if t_name == "regression_label_ids":
cur_tensor = torch.tensor(
[sample[t_name] for sample in features], dtype=torch.float32
)
else:
try:
# Checking weather a non-integer will be silently converted to torch.long
check = features[0][t_name]
if isinstance(check, numbers.Number):
base = check
# extract a base variable from a nested lists or tuples
elif isinstance(check, Iterable):
base = list(flatten_list(check))[0]
# extract a base variable from numpy arrays
else:
base = check.ravel()[0]
if not np.issubdtype(type(base), np.integer):
logger.warning(
f"Problem during conversion to torch tensors:\n"
f"A non-integer value for feature '{t_name}' with a value of: "
f"'{base}' will be converted to a torch tensor of dtype long."
)
except:
logger.warning(
f"Could not determine type for feature '{t_name}'. Converting now to a tensor of default type long."
)
# Convert all remaining python objects to torch long tensors
cur_tensor = torch.tensor(
[sample[t_name] for sample in features], dtype=torch.long
)
all_tensors.append(cur_tensor)
dataset = TensorDataset(*all_tensors)
return dataset, tensor_names
|
https://github.com/deepset-ai/FARM/issues/714
|
Traceback (most recent call last):
File "/home/ubuntu/pycharm/FARM/examples/dpr_encoder.py", line 155, in <module>
dense_passage_retrieval()
File "/home/ubuntu/pycharm/FARM/examples/dpr_encoder.py", line 91, in dense_passage_retrieval
data_silo = DataSilo(processor=processor, batch_size=batch_size, distributed=distributed, max_processes=128)
File "/home/ubuntu/pycharm/FARM/farm/data_handler/data_silo.py", line 113, in __init__
self._load_data()
File "/home/ubuntu/pycharm/FARM/farm/data_handler/data_silo.py", line 272, in _load_data
self._calculate_statistics()
File "/home/ubuntu/pycharm/FARM/farm/data_handler/data_silo.py", line 454, in _calculate_statistics
seq_lens.extend(np.sum(train_input_numpy != self.processor.tokenizer.pad_token_id, axis=1))
AttributeError: 'NoneType' object has no attribute 'pad_token_id'
|
AttributeError
|
def train(self):
"""
Perform the training procedure.
The training is visualized by a progress bar. It counts the epochs in a zero based manner.
For example, when you specify ``epochs=20`` it starts to count from 0 to 19.
If trainer evaluates the model with a test set the result of the
evaluation is stored in ``test_result``.
:return: Returns the model after training. When you do ``early_stopping``
with a ``save_dir`` the best model is loaded and returned.
"""
# connect the prediction heads with the right output from processor
self.model.connect_heads_with_processor(
self.data_silo.processor.tasks, require_labels=True
)
# Check that the tokenizer(s) fits the language model(s)
if hasattr(self.model, "language_model2"):
self.model.verify_vocab_size(
vocab_size1=len(self.data_silo.processor.query_tokenizer),
vocab_size2=len(self.data_silo.processor.passage_tokenizer),
)
else:
self.model.verify_vocab_size(vocab_size=len(self.data_silo.processor.tokenizer))
self.model.train()
do_stopping = False
evalnr = 0
loss = 0
resume_from_step = self.from_step
if self.local_rank in [0, -1]:
logger.info(f"\n {GROWING_TREE}")
for epoch in range(self.from_epoch, self.epochs):
early_break = False
self.from_epoch = epoch
train_data_loader = self.data_silo.get_data_loader("train")
progress_bar = tqdm(
train_data_loader,
disable=self.local_rank not in [0, -1] or self.disable_tqdm,
)
for step, batch in enumerate(progress_bar):
# when resuming training from a checkpoint, we want to fast forward to the step of the checkpoint
if resume_from_step and step <= resume_from_step:
# TODO: Improve skipping for StreamingDataSilo
# The seeds before and within the loop are currently needed, if you need full reproducibility
# of runs with vs. without checkpointing using StreamingDataSilo. Reason: While skipping steps in StreamingDataSilo,
# we update the state of the random number generator (e.g. due to masking words), which can impact the model behaviour (e.g. dropout)
if step % 10000 == 0:
logger.info(f"Skipping {step} out of {resume_from_step} steps ...")
if resume_from_step == step:
logger.info(f"Finished skipping {resume_from_step} steps ...")
resume_from_step = None
else:
continue
progress_bar.set_description(
f"Train epoch {epoch}/{self.epochs - 1} (Cur. train loss: {loss:.4f})"
)
# Only for distributed training: we need to ensure that all ranks still have a batch left for training
if self.local_rank != -1:
if not self._all_ranks_have_data(has_data=1, step=step):
early_break = True
break
# Move batch of samples to device
batch = {key: batch[key].to(self.device) for key in batch}
# Forward & backward pass through model
logits = self.model.forward(**batch)
per_sample_loss = self.model.logits_to_loss(
logits=logits, global_step=self.global_step, **batch
)
loss = self.backward_propagate(per_sample_loss, step)
# Perform evaluation
if (
self.evaluate_every != 0
and self.global_step % self.evaluate_every == 0
and self.global_step != 0
and self.local_rank in [0, -1]
):
# When using StreamingDataSilo, each evaluation creates a new instance of
# dev_data_loader. In cases like training from scratch, this could cause
# some variance across evaluators due to the randomness in word masking.
dev_data_loader = self.data_silo.get_data_loader("dev")
if dev_data_loader is not None:
evaluator_dev = Evaluator(
data_loader=dev_data_loader,
tasks=self.data_silo.processor.tasks,
device=self.device,
report=self.eval_report,
)
evalnr += 1
result = evaluator_dev.eval(self.model)
evaluator_dev.log_results(result, "Dev", self.global_step)
if self.early_stopping:
do_stopping, save_model, eval_value = (
self.early_stopping.check_stopping(result)
)
if save_model:
logger.info(
"Saving current best model to {}, eval={}".format(
self.early_stopping.save_dir, eval_value
)
)
self.model.save(self.early_stopping.save_dir)
self.data_silo.processor.save(self.early_stopping.save_dir)
if do_stopping:
# log the stopping
logger.info(
"STOPPING EARLY AT EPOCH {}, STEP {}, EVALUATION {}".format(
epoch, step, evalnr
)
)
if do_stopping:
break
self.global_step += 1
self.from_step = step + 1
# save the current state as a checkpoint before exiting if a SIGTERM signal is received
if self.sigterm_handler and self.sigterm_handler.kill_now:
logger.info(
"Received a SIGTERM signal. Saving the current train state as a checkpoint ..."
)
if self.local_rank in [0, -1]:
self._save()
torch.distributed.destroy_process_group()
sys.exit(0)
# save a checkpoint and continue train
if self.checkpoint_every and step % self.checkpoint_every == 0:
if self.local_rank in [0, -1]:
self._save()
# Let other ranks wait until rank 0 has finished saving
if self.local_rank != -1:
torch.distributed.barrier()
if do_stopping:
break
# Only for distributed training: we need to ensure that all ranks still have a batch left for training
if self.local_rank != -1 and not early_break:
self._all_ranks_have_data(has_data=False)
# With early stopping we want to restore the best model
if self.early_stopping and self.early_stopping.save_dir:
logger.info(
"Restoring best model so far from {}".format(self.early_stopping.save_dir)
)
lm_name = self.model.language_model.name
self.model = AdaptiveModel.load(
self.early_stopping.save_dir, self.device, lm_name=lm_name
)
self.model.connect_heads_with_processor(
self.data_silo.processor.tasks, require_labels=True
)
# Eval on test set
if self.evaluator_test and self.local_rank in [0, -1]:
test_data_loader = self.data_silo.get_data_loader("test")
if test_data_loader is not None:
evaluator_test = Evaluator(
data_loader=test_data_loader,
tasks=self.data_silo.processor.tasks,
device=self.device,
)
self.test_result = evaluator_test.eval(self.model)
evaluator_test.log_results(self.test_result, "Test", self.global_step)
return self.model
|
def train(self):
"""
Perform the training procedure.
The training is visualized by a progress bar. It counts the epochs in a zero based manner.
For example, when you specify ``epochs=20`` it starts to count from 0 to 19.
If trainer evaluates the model with a test set the result of the
evaluation is stored in ``test_result``.
:return: Returns the model after training. When you do ``early_stopping``
with a ``save_dir`` the best model is loaded and returned.
"""
# connect the prediction heads with the right output from processor
self.model.connect_heads_with_processor(
self.data_silo.processor.tasks, require_labels=True
)
# Check that the tokenizer(s) fits the language model(s)
if hasattr(self.model, "language_model2"):
self.model.verify_vocab_size(
vocab_size1=len(self.data_silo.processor.tokenizer),
vocab_size2=len(self.data_silo.processor.passage_tokenizer),
)
else:
self.model.verify_vocab_size(vocab_size=len(self.data_silo.processor.tokenizer))
self.model.train()
do_stopping = False
evalnr = 0
loss = 0
resume_from_step = self.from_step
if self.local_rank in [0, -1]:
logger.info(f"\n {GROWING_TREE}")
for epoch in range(self.from_epoch, self.epochs):
early_break = False
self.from_epoch = epoch
train_data_loader = self.data_silo.get_data_loader("train")
progress_bar = tqdm(
train_data_loader,
disable=self.local_rank not in [0, -1] or self.disable_tqdm,
)
for step, batch in enumerate(progress_bar):
# when resuming training from a checkpoint, we want to fast forward to the step of the checkpoint
if resume_from_step and step <= resume_from_step:
# TODO: Improve skipping for StreamingDataSilo
# The seeds before and within the loop are currently needed, if you need full reproducibility
# of runs with vs. without checkpointing using StreamingDataSilo. Reason: While skipping steps in StreamingDataSilo,
# we update the state of the random number generator (e.g. due to masking words), which can impact the model behaviour (e.g. dropout)
if step % 10000 == 0:
logger.info(f"Skipping {step} out of {resume_from_step} steps ...")
if resume_from_step == step:
logger.info(f"Finished skipping {resume_from_step} steps ...")
resume_from_step = None
else:
continue
progress_bar.set_description(
f"Train epoch {epoch}/{self.epochs - 1} (Cur. train loss: {loss:.4f})"
)
# Only for distributed training: we need to ensure that all ranks still have a batch left for training
if self.local_rank != -1:
if not self._all_ranks_have_data(has_data=1, step=step):
early_break = True
break
# Move batch of samples to device
batch = {key: batch[key].to(self.device) for key in batch}
# Forward & backward pass through model
logits = self.model.forward(**batch)
per_sample_loss = self.model.logits_to_loss(
logits=logits, global_step=self.global_step, **batch
)
loss = self.backward_propagate(per_sample_loss, step)
# Perform evaluation
if (
self.evaluate_every != 0
and self.global_step % self.evaluate_every == 0
and self.global_step != 0
and self.local_rank in [0, -1]
):
# When using StreamingDataSilo, each evaluation creates a new instance of
# dev_data_loader. In cases like training from scratch, this could cause
# some variance across evaluators due to the randomness in word masking.
dev_data_loader = self.data_silo.get_data_loader("dev")
if dev_data_loader is not None:
evaluator_dev = Evaluator(
data_loader=dev_data_loader,
tasks=self.data_silo.processor.tasks,
device=self.device,
report=self.eval_report,
)
evalnr += 1
result = evaluator_dev.eval(self.model)
evaluator_dev.log_results(result, "Dev", self.global_step)
if self.early_stopping:
do_stopping, save_model, eval_value = (
self.early_stopping.check_stopping(result)
)
if save_model:
logger.info(
"Saving current best model to {}, eval={}".format(
self.early_stopping.save_dir, eval_value
)
)
self.model.save(self.early_stopping.save_dir)
self.data_silo.processor.save(self.early_stopping.save_dir)
if do_stopping:
# log the stopping
logger.info(
"STOPPING EARLY AT EPOCH {}, STEP {}, EVALUATION {}".format(
epoch, step, evalnr
)
)
if do_stopping:
break
self.global_step += 1
self.from_step = step + 1
# save the current state as a checkpoint before exiting if a SIGTERM signal is received
if self.sigterm_handler and self.sigterm_handler.kill_now:
logger.info(
"Received a SIGTERM signal. Saving the current train state as a checkpoint ..."
)
if self.local_rank in [0, -1]:
self._save()
torch.distributed.destroy_process_group()
sys.exit(0)
# save a checkpoint and continue train
if self.checkpoint_every and step % self.checkpoint_every == 0:
if self.local_rank in [0, -1]:
self._save()
# Let other ranks wait until rank 0 has finished saving
if self.local_rank != -1:
torch.distributed.barrier()
if do_stopping:
break
# Only for distributed training: we need to ensure that all ranks still have a batch left for training
if self.local_rank != -1 and not early_break:
self._all_ranks_have_data(has_data=False)
# With early stopping we want to restore the best model
if self.early_stopping and self.early_stopping.save_dir:
logger.info(
"Restoring best model so far from {}".format(self.early_stopping.save_dir)
)
lm_name = self.model.language_model.name
self.model = AdaptiveModel.load(
self.early_stopping.save_dir, self.device, lm_name=lm_name
)
self.model.connect_heads_with_processor(
self.data_silo.processor.tasks, require_labels=True
)
# Eval on test set
if self.evaluator_test and self.local_rank in [0, -1]:
test_data_loader = self.data_silo.get_data_loader("test")
if test_data_loader is not None:
evaluator_test = Evaluator(
data_loader=test_data_loader,
tasks=self.data_silo.processor.tasks,
device=self.device,
)
self.test_result = evaluator_test.eval(self.model)
evaluator_test.log_results(self.test_result, "Test", self.global_step)
return self.model
|
https://github.com/deepset-ai/FARM/issues/714
|
Traceback (most recent call last):
File "/home/ubuntu/pycharm/FARM/examples/dpr_encoder.py", line 155, in <module>
dense_passage_retrieval()
File "/home/ubuntu/pycharm/FARM/examples/dpr_encoder.py", line 91, in dense_passage_retrieval
data_silo = DataSilo(processor=processor, batch_size=batch_size, distributed=distributed, max_processes=128)
File "/home/ubuntu/pycharm/FARM/farm/data_handler/data_silo.py", line 113, in __init__
self._load_data()
File "/home/ubuntu/pycharm/FARM/farm/data_handler/data_silo.py", line 272, in _load_data
self._calculate_statistics()
File "/home/ubuntu/pycharm/FARM/farm/data_handler/data_silo.py", line 454, in _calculate_statistics
seq_lens.extend(np.sum(train_input_numpy != self.processor.tokenizer.pad_token_id, axis=1))
AttributeError: 'NoneType' object has no attribute 'pad_token_id'
|
AttributeError
|
def load(cls, pretrained_model_name_or_path, language=None, **kwargs):
"""
Load a pretrained model by supplying
* the name of a remote model on s3 ("distilbert-base-german-cased" ...)
* OR a local path of a model trained via transformers ("some_dir/huggingface_model")
* OR a local path of a model trained via FARM ("some_dir/farm_model")
:param pretrained_model_name_or_path: The path of the saved pretrained model or its name.
:type pretrained_model_name_or_path: str
"""
distilbert = cls()
if "farm_lm_name" in kwargs:
distilbert.name = kwargs["farm_lm_name"]
else:
distilbert.name = pretrained_model_name_or_path
# We need to differentiate between loading model using FARM format and Pytorch-Transformers format
farm_lm_config = Path(pretrained_model_name_or_path) / "language_model_config.json"
if os.path.exists(farm_lm_config):
# FARM style
config = DistilBertConfig.from_pretrained(farm_lm_config)
farm_lm_model = Path(pretrained_model_name_or_path) / "language_model.bin"
distilbert.model = DistilBertModel.from_pretrained(
farm_lm_model, config=config, **kwargs
)
distilbert.language = distilbert.model.config.language
else:
# Pytorch-transformer Style
distilbert.model = DistilBertModel.from_pretrained(
str(pretrained_model_name_or_path), **kwargs
)
distilbert.language = cls._get_or_infer_language_from_name(
language, pretrained_model_name_or_path
)
config = distilbert.model.config
# DistilBERT does not provide a pooled_output by default. Therefore, we need to initialize an extra pooler.
# The pooler takes the first hidden representation & feeds it to a dense layer of (hidden_dim x hidden_dim).
# We don't want a dropout in the end of the pooler, since we do that already in the adaptive model before we
# feed everything to the prediction head
config.summary_last_dropout = 0
config.summary_type = "first"
config.summary_activation = "tanh"
distilbert.pooler = SequenceSummary(config)
distilbert.pooler.apply(distilbert.model._init_weights)
return distilbert
|
def load(cls, pretrained_model_name_or_path, language=None, **kwargs):
"""
Load a pretrained model by supplying
* the name of a remote model on s3 ("distilbert-base-german-cased" ...)
* OR a local path of a model trained via transformers ("some_dir/huggingface_model")
* OR a local path of a model trained via FARM ("some_dir/farm_model")
:param pretrained_model_name_or_path: The path of the saved pretrained model or its name.
:type pretrained_model_name_or_path: str
"""
distilbert = cls()
if "farm_lm_name" in kwargs:
distilbert.name = kwargs["farm_lm_name"]
else:
distilbert.name = pretrained_model_name_or_path
# We need to differentiate between loading model using FARM format and Pytorch-Transformers format
farm_lm_config = Path(pretrained_model_name_or_path) / "language_model_config.json"
if os.path.exists(farm_lm_config):
# FARM style
config = AlbertConfig.from_pretrained(farm_lm_config)
farm_lm_model = Path(pretrained_model_name_or_path) / "language_model.bin"
distilbert.model = DistilBertModel.from_pretrained(
farm_lm_model, config=config, **kwargs
)
distilbert.language = distilbert.model.config.language
else:
# Pytorch-transformer Style
distilbert.model = DistilBertModel.from_pretrained(
str(pretrained_model_name_or_path), **kwargs
)
distilbert.language = cls._get_or_infer_language_from_name(
language, pretrained_model_name_or_path
)
config = distilbert.model.config
# DistilBERT does not provide a pooled_output by default. Therefore, we need to initialize an extra pooler.
# The pooler takes the first hidden representation & feeds it to a dense layer of (hidden_dim x hidden_dim).
# We don't want a dropout in the end of the pooler, since we do that already in the adaptive model before we
# feed everything to the prediction head
config.summary_last_dropout = 0
config.summary_type = "first"
config.summary_activation = "tanh"
distilbert.pooler = SequenceSummary(config)
distilbert.pooler.apply(distilbert.model._init_weights)
return distilbert
|
https://github.com/deepset-ai/FARM/issues/553
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-83-b2a730b6ac24> in <module>
----> 1 convert_to_transformers()
<ipython-input-82-8ab35f02f804> in convert_to_transformers()
12
13 # convert to transformers
---> 14 transformer_model = model.convert_to_transformers()
15
16 # save it (note: transformers use str instead of Path objects)
555 setattr(transformers_model, transformers_model.base_model_prefix, self.language_model.model)
556 transformers_model.classifier.load_state_dict(
--> 557 self.prediction_heads[0].feed_forward.feed_forward[0].state_dict())
558 elif self.prediction_heads[0].model_type == "token_classification":
559 # add more info to config
1043 if len(error_msgs) > 0:
1044 raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
-> 1045 self.__class__.__name__, "\n\t".join(error_msgs)))
1046 return _IncompatibleKeys(missing_keys, unexpected_keys)
1047
RuntimeError: Error(s) in loading state_dict for Linear:
size mismatch for weight: copying a param with shape torch.Size([2, 768]) from checkpoint, the shape in current model is torch.Size([2, 4096]).
|
RuntimeError
|
def convert_to_transformers(self):
if (
len(self.prediction_heads) == 2
and self.prediction_heads[0].model_type == "language_modelling"
):
logger.warning(
"Currently only the Masked Language Modeling component of the prediction head is converted, "
"not the Next Sentence Prediction or Sentence Order Prediction components"
)
elif len(self.prediction_heads) != 1:
raise ValueError(
f"Currently conversion only works for models with a SINGLE prediction head. "
f"Your model has {len(self.prediction_heads)}"
)
elif len(self.prediction_heads[0].layer_dims) != 2:
raise ValueError(
f"Currently conversion only works for PredictionHeads that are a single layer Feed Forward NN with dimensions [LM_output_dim, number_classes].\n"
f" Your PredictionHead has {str(self.prediction_heads[0].layer_dims)} dimensions."
)
# TODO add more infos to config
if self.prediction_heads[0].model_type == "span_classification":
# init model
transformers_model = AutoModelForQuestionAnswering.from_config(
self.language_model.model.config
)
# transfer weights for language model + prediction head
setattr(
transformers_model,
transformers_model.base_model_prefix,
self.language_model.model,
)
transformers_model.qa_outputs.load_state_dict(
self.prediction_heads[0].feed_forward.feed_forward[0].state_dict()
)
elif self.prediction_heads[0].model_type == "language_modelling":
# init model
transformers_model = AutoModelWithLMHead.from_config(
self.language_model.model.config
)
# transfer weights for language model + prediction head
setattr(
transformers_model,
transformers_model.base_model_prefix,
self.language_model.model,
)
# Adding decoder bias (required for conversion to transformers)
self.prediction_heads[0].decoder.bias = self.prediction_heads[0].bias
ph_state_dict = self.prediction_heads[0].state_dict()
ph_state_dict["transform.dense.weight"] = ph_state_dict.pop("dense.weight")
ph_state_dict["transform.dense.bias"] = ph_state_dict.pop("dense.bias")
ph_state_dict["transform.LayerNorm.weight"] = ph_state_dict.pop(
"LayerNorm.weight"
)
ph_state_dict["transform.LayerNorm.bias"] = ph_state_dict.pop("LayerNorm.bias")
transformers_model.cls.predictions.load_state_dict(ph_state_dict)
elif self.prediction_heads[0].model_type == "text_classification":
if self.language_model.model.base_model_prefix == "roberta":
# Classification Heads in transformers have different architecture across Language Model variants
# The RobertaClassificationhead has components: input2dense, dropout, tanh, dense2output
# The tanh function cannot be mapped to current FARM style linear Feed Forward ClassificationHeads.
# So conversion for this type cannot work. We would need a compatible FARM RobertaClassificationHead
logger.error(
"Conversion for Text Classification with Roberta or XLMRoberta not possible at the moment."
)
raise NotImplementedError
# add more info to config
self.language_model.model.config.id2label = {
id: label for id, label in enumerate(self.prediction_heads[0].label_list)
}
self.language_model.model.config.label2id = {
label: id for id, label in enumerate(self.prediction_heads[0].label_list)
}
self.language_model.model.config.finetuning_task = "text_classification"
self.language_model.model.config.language = self.language_model.language
self.language_model.model.config.num_labels = self.prediction_heads[
0
].num_labels
# init model
transformers_model = AutoModelForSequenceClassification.from_config(
self.language_model.model.config
)
# transfer weights for language model + prediction head
setattr(
transformers_model,
transformers_model.base_model_prefix,
self.language_model.model,
)
transformers_model.classifier.load_state_dict(
self.prediction_heads[0].feed_forward.feed_forward[0].state_dict()
)
elif self.prediction_heads[0].model_type == "token_classification":
# add more info to config
self.language_model.model.config.id2label = {
id: label for id, label in enumerate(self.prediction_heads[0].label_list)
}
self.language_model.model.config.label2id = {
label: id for id, label in enumerate(self.prediction_heads[0].label_list)
}
self.language_model.model.config.finetuning_task = "token_classification"
self.language_model.model.config.language = self.language_model.language
self.language_model.model.config.num_labels = self.prediction_heads[
0
].num_labels
# init model
transformers_model = AutoModelForTokenClassification.from_config(
self.language_model.model.config
)
# transfer weights for language model + prediction head
setattr(
transformers_model,
transformers_model.base_model_prefix,
self.language_model.model,
)
transformers_model.classifier.load_state_dict(
self.prediction_heads[0].feed_forward.feed_forward[0].state_dict()
)
else:
raise NotImplementedError(
f"FARM -> Transformers conversion is not supported yet for"
f" prediction heads of type {self.prediction_heads[0].model_type}"
)
pass
return transformers_model
|
def convert_to_transformers(self):
if len(self.prediction_heads) != 1:
raise ValueError(
f"Currently conversion only works for models with a SINGLE prediction head. "
f"Your model has {len(self.prediction_heads)}"
)
elif len(self.prediction_heads[0].layer_dims) != 2:
raise ValueError(
f"Currently conversion only works for PredictionHeads that are a single layer Feed Forward NN with dimensions [LM_output_dim, number_classes].\n"
f" Your PredictionHead has {str(self.prediction_heads[0].layer_dims)} dimensions."
)
# TODO add more infos to config
if self.prediction_heads[0].model_type == "span_classification":
# init model
transformers_model = AutoModelForQuestionAnswering.from_config(
self.language_model.model.config
)
# transfer weights for language model + prediction head
setattr(
transformers_model,
transformers_model.base_model_prefix,
self.language_model.model,
)
transformers_model.qa_outputs.load_state_dict(
self.prediction_heads[0].feed_forward.feed_forward[0].state_dict()
)
elif self.prediction_heads[0].model_type == "language_modelling":
# init model
transformers_model = AutoModelWithLMHead.from_config(
self.language_model.model.config
)
# transfer weights for language model + prediction head
setattr(
transformers_model,
transformers_model.base_model_prefix,
self.language_model.model,
)
ph_state_dict = self.prediction_heads[0].state_dict()
ph_state_dict["transform.dense.weight"] = ph_state_dict.pop("dense.weight")
ph_state_dict["transform.dense.bias"] = ph_state_dict.pop("dense.bias")
ph_state_dict["transform.LayerNorm.weight"] = ph_state_dict.pop(
"LayerNorm.weight"
)
ph_state_dict["transform.LayerNorm.bias"] = ph_state_dict.pop("LayerNorm.bias")
transformers_model.cls.predictions.load_state_dict(ph_state_dict)
logger.warning(
"Currently only the Masked Language Modeling component of the prediction head is converted, "
"not the Next Sentence Prediction or Sentence Order Prediction components"
)
elif self.prediction_heads[0].model_type == "text_classification":
if self.language_model.model.base_model_prefix == "roberta":
# Classification Heads in transformers have different architecture across Language Model variants
# The RobertaClassificationhead has components: input2dense, dropout, tanh, dense2output
# The tanh function cannot be mapped to current FARM style linear Feed Forward ClassificationHeads.
# So conversion for this type cannot work. We would need a compatible FARM RobertaClassificationHead
logger.error(
"Conversion for Text Classification with Roberta or XLMRoberta not possible at the moment."
)
raise NotImplementedError
# add more info to config
self.language_model.model.config.id2label = {
id: label for id, label in enumerate(self.prediction_heads[0].label_list)
}
self.language_model.model.config.label2id = {
label: id for id, label in enumerate(self.prediction_heads[0].label_list)
}
self.language_model.model.config.finetuning_task = "text_classification"
self.language_model.model.config.language = self.language_model.language
self.language_model.model.config.num_labels = self.prediction_heads[
0
].num_labels
# init model
transformers_model = AutoModelForSequenceClassification.from_config(
self.language_model.model.config
)
# transfer weights for language model + prediction head
setattr(
transformers_model,
transformers_model.base_model_prefix,
self.language_model.model,
)
transformers_model.classifier.load_state_dict(
self.prediction_heads[0].feed_forward.feed_forward[0].state_dict()
)
elif self.prediction_heads[0].model_type == "token_classification":
# add more info to config
self.language_model.model.config.id2label = {
id: label for id, label in enumerate(self.prediction_heads[0].label_list)
}
self.language_model.model.config.label2id = {
label: id for id, label in enumerate(self.prediction_heads[0].label_list)
}
self.language_model.model.config.finetuning_task = "token_classification"
self.language_model.model.config.language = self.language_model.language
self.language_model.model.config.num_labels = self.prediction_heads[
0
].num_labels
# init model
transformers_model = AutoModelForTokenClassification.from_config(
self.language_model.model.config
)
# transfer weights for language model + prediction head
setattr(
transformers_model,
transformers_model.base_model_prefix,
self.language_model.model,
)
transformers_model.classifier.load_state_dict(
self.prediction_heads[0].feed_forward.feed_forward[0].state_dict()
)
else:
raise NotImplementedError(
f"FARM -> Transformers conversion is not supported yet for"
f" prediction heads of type {self.prediction_heads[0].model_type}"
)
pass
return transformers_model
|
https://github.com/deepset-ai/FARM/issues/533
|
Traceback (most recent call last):
File "conversion_huggingface_models.py", line 88, in <module>
convert_to_transformers("./farm_saved_models/bert-english-lm",
File "conversion_huggingface_models.py", line 46, in convert_to_transformers
transformer_model = model.convert_to_transformers()
File "/home/himanshu/.conda/envs/tf2/lib/python3.8/site-packages/farm/modeling/adaptive_model.py", line 509, in convert_to_transformers
elif len(self.prediction_heads[0].layer_dims) != 2:
File "/home/himanshu/.conda/envs/tf2/lib/python3.8/site-packages/torch/nn/modules/module.py", line 771, in __getattr__
raise ModuleAttributeError("'{}' object has no attribute '{}'".format(
torch.nn.modules.module.ModuleAttributeError: 'BertLMHead' object has no attribute 'layer_dims'
|
torch.nn.modules.module.ModuleAttributeError
|
def __init__(
self, hidden_size, vocab_size, hidden_act="gelu", task_name="lm", **kwargs
):
super(BertLMHead, self).__init__()
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.vocab_size = vocab_size
self.loss_fct = CrossEntropyLoss(reduction="none", ignore_index=-1)
self.num_labels = vocab_size # vocab size
# Adding layer_dims (required for conversion to transformers)
self.layer_dims = [hidden_size, vocab_size]
# TODO Check if weight init needed!
# self.apply(self.init_bert_weights)
self.ph_output_type = "per_token"
self.model_type = "language_modelling"
self.task_name = task_name
self.generate_config()
# NN Layers
# this is the "transform" module in the pytorch-transformers repo
self.dense = nn.Linear(self.hidden_size, self.hidden_size)
self.transform_act_fn = ACT2FN[self.hidden_act]
self.LayerNorm = BertLayerNorm(self.hidden_size, eps=1e-12)
# this is the "decoder" in the pytorch-transformers repo
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(hidden_size, vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(vocab_size))
|
def __init__(
self, hidden_size, vocab_size, hidden_act="gelu", task_name="lm", **kwargs
):
super(BertLMHead, self).__init__()
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.vocab_size = vocab_size
self.loss_fct = CrossEntropyLoss(reduction="none", ignore_index=-1)
self.num_labels = vocab_size # vocab size
# TODO Check if weight init needed!
# self.apply(self.init_bert_weights)
self.ph_output_type = "per_token"
self.model_type = "language_modelling"
self.task_name = task_name
self.generate_config()
# NN Layers
# this is the "transform" module in the pytorch-transformers repo
self.dense = nn.Linear(self.hidden_size, self.hidden_size)
self.transform_act_fn = ACT2FN[self.hidden_act]
self.LayerNorm = BertLayerNorm(self.hidden_size, eps=1e-12)
# this is the "decoder" in the pytorch-transformers repo
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(hidden_size, vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(vocab_size))
|
https://github.com/deepset-ai/FARM/issues/533
|
Traceback (most recent call last):
File "conversion_huggingface_models.py", line 88, in <module>
convert_to_transformers("./farm_saved_models/bert-english-lm",
File "conversion_huggingface_models.py", line 46, in convert_to_transformers
transformer_model = model.convert_to_transformers()
File "/home/himanshu/.conda/envs/tf2/lib/python3.8/site-packages/farm/modeling/adaptive_model.py", line 509, in convert_to_transformers
elif len(self.prediction_heads[0].layer_dims) != 2:
File "/home/himanshu/.conda/envs/tf2/lib/python3.8/site-packages/torch/nn/modules/module.py", line 771, in __getattr__
raise ModuleAttributeError("'{}' object has no attribute '{}'".format(
torch.nn.modules.module.ModuleAttributeError: 'BertLMHead' object has no attribute 'layer_dims'
|
torch.nn.modules.module.ModuleAttributeError
|
def question_answering():
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
ml_logger = MLFlowLogger(tracking_uri="https://public-mlflow.deepset.ai/")
ml_logger.init_experiment(
experiment_name="Public_FARM", run_name="Run_natural_questions"
)
##########################
########## Settings
##########################
set_all_seeds(seed=42)
device, n_gpu = initialize_device_settings(use_cuda=True)
batch_size = 24
n_epochs = 1
evaluate_every = 500
lang_model = "deepset/roberta-base-squad2" # start with a model that can already extract answers
do_lower_case = False # roberta is a cased model
train_filename = "train_medium.jsonl"
dev_filename = "dev_medium.jsonl"
keep_is_impossible = 0.15 # downsample negative examples after data conversion
downsample_context_size = (
300 # reduce length of wikipedia articles to relevant part around the answer
)
# 1.Create a tokenizer
tokenizer = Tokenizer.load(
pretrained_model_name_or_path=lang_model, do_lower_case=do_lower_case
)
# Add HTML tag tokens to the tokenizer vocabulary, so they do not get split apart
html_tags = [
"<Th>",
"</Th>",
"<Td>",
"</Td>",
"<Tr>",
"</Tr>",
"<Li>",
"</Li>",
"<P>",
"</P>",
"<Ul>",
"</Ul>",
"<H1>",
"</H1>",
"<H2>",
"</H2>",
"<H3>",
"</H3>",
"<H4>",
"</H4>",
"<H5>",
"</H5>",
"<Td_colspan=",
]
tokenizer.add_tokens(html_tags)
# 2. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
processor = NaturalQuestionsProcessor(
tokenizer=tokenizer,
max_seq_len=384,
train_filename=train_filename,
dev_filename=dev_filename,
keep_no_answer=keep_is_impossible,
downsample_context_size=downsample_context_size,
data_dir=Path("../data/natural_questions"),
)
# 3. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them and calculates a few descriptive statistics of our datasets
data_silo = DataSilo(processor=processor, batch_size=batch_size, caching=True)
# 4. Create an AdaptiveModel
# a) which consists of a pretrained language model as a basis
language_model = LanguageModel.load(lang_model, n_added_tokens=len(html_tags))
# b) and in case of Natural Questions we need two Prediction Heads
# one for extractive Question Answering
qa_head = QuestionAnsweringHead()
# another one for answering yes/no questions or deciding if the given text passage might contain an answer
classification_head = TextClassificationHead(
num_labels=len(processor.answer_type_list)
) # answer_type_list = ["is_impossible", "span", "yes", "no"]
model = AdaptiveModel(
language_model=language_model,
prediction_heads=[qa_head, classification_head],
embeds_dropout_prob=0.1,
lm_output_types=["per_token", "per_sequence"],
device=device,
)
# 5. Create an optimizer
model, optimizer, lr_schedule = initialize_optimizer(
model=model,
learning_rate=3e-5,
schedule_opts={"name": "LinearWarmup", "warmup_proportion": 0.2},
n_batches=len(data_silo.loaders["train"]),
n_epochs=n_epochs,
device=device,
)
# 6. Feed everything to the Trainer, which keeps care of growing our model and evaluates it from time to time
trainer = Trainer(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=device,
)
# 7. Let it grow! Watch the tracked metrics live on the public mlflow server: https://public-mlflow.deepset.ai
trainer.train()
# 8. Hooray! You have a model. Store it:
save_dir = Path("../saved_models/roberta-base-squad2-nq")
model.save(save_dir)
processor.save(save_dir)
# 9. Since training on the whole NQ corpus requires substantial compute resources we trained and uploaded a model on s3
fetch_archive_from_http(
"https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/models/roberta-base-squad2-nq.zip",
output_dir="../saved_models/farm",
)
QA_input = [
{
"qas": [
"Did GameTrailers rated Twilight Princess as one of the best games ever created?"
],
"context": "Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.",
}
]
model = QAInferencer.load(
model_name_or_path="../saved_models/farm/roberta-base-squad2-nq",
batch_size=batch_size,
gpu=True,
)
result = model.inference_from_dicts(
dicts=QA_input, return_json=False
) # result is a list of QAPred objects
print(
f"\nQuestion: Did GameTrailers rated Twilight Princess as one of the best games ever created?"
f"\nAnswer from model: {result[0].prediction[0].answer}"
)
model.close_multiprocessing_pool()
|
def question_answering():
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
ml_logger = MLFlowLogger(tracking_uri="https://public-mlflow.deepset.ai/")
ml_logger.init_experiment(
experiment_name="Public_FARM", run_name="Run_natural_questions"
)
##########################
########## Settings
##########################
set_all_seeds(seed=42)
device, n_gpu = initialize_device_settings(use_cuda=True)
batch_size = 24
n_epochs = 1
evaluate_every = 500
lang_model = "deepset/roberta-base-squad2" # start with a model that can already extract answers
do_lower_case = False # roberta is a cased model
train_filename = "train_medium.jsonl"
dev_filename = "dev_medium.jsonl"
keep_is_impossible = 0.15 # downsample negative examples after data conversion
downsample_context_size = (
300 # reduce length of wikipedia articles to relevant part around the answer
)
# 1.Create a tokenizer
tokenizer = Tokenizer.load(
pretrained_model_name_or_path=lang_model, do_lower_case=do_lower_case
)
# Add HTML tag tokens to the tokenizer vocabulary, so they do not get split apart
html_tags = [
"<Th>",
"</Th>",
"<Td>",
"</Td>",
"<Tr>",
"</Tr>",
"<Li>",
"</Li>",
"<P>",
"</P>",
"<Ul>",
"</Ul>",
"<H1>",
"</H1>",
"<H2>",
"</H2>",
"<H3>",
"</H3>",
"<H4>",
"</H4>",
"<H5>",
"</H5>",
"<Td_colspan=",
]
tokenizer.add_tokens(html_tags)
# 2. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
processor = NaturalQuestionsProcessor(
tokenizer=tokenizer,
max_seq_len=384,
train_filename=train_filename,
dev_filename=dev_filename,
keep_no_answer=keep_is_impossible,
downsample_context_size=downsample_context_size,
data_dir=Path("../data/natural_questions"),
)
# 3. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them and calculates a few descriptive statistics of our datasets
data_silo = DataSilo(processor=processor, batch_size=batch_size, caching=True)
# 4. Create an AdaptiveModel
# a) which consists of a pretrained language model as a basis
language_model = LanguageModel.load(lang_model, n_added_tokens=len(html_tags))
# b) and in case of Natural Questions we need two Prediction Heads
# one for extractive Question Answering
qa_head = QuestionAnsweringHead()
# another one for answering yes/no questions or deciding if the given text passage might contain an answer
classification_head = TextClassificationHead(
num_labels=len(processor.answer_type_list)
) # answer_type_list = ["is_impossible", "span", "yes", "no"]
model = AdaptiveModel(
language_model=language_model,
prediction_heads=[qa_head, classification_head],
embeds_dropout_prob=0.1,
lm_output_types=["per_token", "per_sequence"],
device=device,
)
# 5. Create an optimizer
model, optimizer, lr_schedule = initialize_optimizer(
model=model,
learning_rate=3e-5,
schedule_opts={"name": "LinearWarmup", "warmup_proportion": 0.2},
n_batches=len(data_silo.loaders["train"]),
n_epochs=n_epochs,
device=device,
)
# 6. Feed everything to the Trainer, which keeps care of growing our model and evaluates it from time to time
trainer = Trainer(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=device,
)
# 7. Let it grow! Watch the tracked metrics live on the public mlflow server: https://public-mlflow.deepset.ai
trainer.train()
# 8. Hooray! You have a model. Store it:
save_dir = Path("../saved_models/roberta-base-squad2-nq")
model.save(save_dir)
processor.save(save_dir)
# 9. Since training on the whole NQ corpus requires substantial compute resources we trained and uploaded a model on s3
fetch_archive_from_http(
"https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/models/roberta-base-squad2-nq.zip",
output_dir="../saved_models/farm",
)
QA_input = [
{
"qas": [
"Did GameTrailers rated Twilight Princess as one of the best games ever created?"
],
"context": "Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.",
}
]
model = QAInferencer.load(
model_name_or_path="../saved_models/farm/roberta-base-squad2-nq",
batch_size=batch_size,
gpu=True,
)
result = model.inference_from_dicts(
dicts=QA_input, return_json=False
) # result is a list of QAPred objects
print(
f"\nQuestion: Did GameTrailers rated Twilight Princess as one of the best games ever created?"
f"\nAnswer from model: {result[0].prediction[0].answer}"
)
model.close_multiprcessing_pool()
|
https://github.com/deepset-ai/FARM/issues/520
|
"""
Traceback (most recent call last):
File "/usr/lib/python3.8/multiprocessing/pool.py", line 125, in worker
result = (True, func(*args, **kwds))
File "/home/fabio/src/git_repositories/FARM/farm/infer.py", line 569, in _create_datasets_chunkwise
dataset, tensor_names, baskets = processor.dataset_from_dicts(dicts, indices, return_baskets=True)
File "/home/fabio/src/git_repositories/FARM/farm/data_handler/processor.py", line 361, in dataset_from_dicts
id_external = self._id_from_dict(d)
File "/home/fabio/src/git_repositories/FARM/farm/data_handler/processor.py", line 403, in _id_from_dict
ext_id = try_get(ID_NAMES, d["qas"][0])
File "/home/fabio/src/git_repositories/FARM/farm/utils.py", line 432, in try_get
ret = dictionary[key]
TypeError: string indices must be integers
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/fabio/src/git_repositories/FARM/examples/natural_questions.py", line 142, in <module>
question_answering()
File "/home/fabio/src/git_repositories/FARM/examples/natural_questions.py", line 135, in question_answering
result = model.inference_from_dicts(dicts=QA_input, return_json=False) # result is a list of QAPred objects
File "/home/fabio/src/git_repositories/FARM/farm/infer.py", line 696, in inference_from_dicts
return Inferencer.inference_from_dicts(self, dicts, return_json=return_json,
File "/home/fabio/src/git_repositories/FARM/farm/infer.py", line 474, in inference_from_dicts
return list(predictions)
File "/home/fabio/src/git_repositories/FARM/farm/infer.py", line 545, in _inference_with_multiprocessing
for dataset, tensor_names, baskets in results:
File "/usr/lib/python3.8/multiprocessing/pool.py", line 865, in next
raise value
TypeError: string indices must be integers
|
TypeError
|
def try_get(keys, dictionary):
try:
for key in keys:
if key in dictionary:
ret = dictionary[key]
if type(ret) == list:
ret = ret[0]
return ret
except Exception as e:
logger.warning(f"Cannot extract from dict {dictionary} with error: {e}")
return None
|
def try_get(keys, dictionary):
for key in keys:
if key in dictionary:
ret = dictionary[key]
if type(ret) == list:
ret = ret[0]
return ret
return None
|
https://github.com/deepset-ai/FARM/issues/520
|
"""
Traceback (most recent call last):
File "/usr/lib/python3.8/multiprocessing/pool.py", line 125, in worker
result = (True, func(*args, **kwds))
File "/home/fabio/src/git_repositories/FARM/farm/infer.py", line 569, in _create_datasets_chunkwise
dataset, tensor_names, baskets = processor.dataset_from_dicts(dicts, indices, return_baskets=True)
File "/home/fabio/src/git_repositories/FARM/farm/data_handler/processor.py", line 361, in dataset_from_dicts
id_external = self._id_from_dict(d)
File "/home/fabio/src/git_repositories/FARM/farm/data_handler/processor.py", line 403, in _id_from_dict
ext_id = try_get(ID_NAMES, d["qas"][0])
File "/home/fabio/src/git_repositories/FARM/farm/utils.py", line 432, in try_get
ret = dictionary[key]
TypeError: string indices must be integers
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/fabio/src/git_repositories/FARM/examples/natural_questions.py", line 142, in <module>
question_answering()
File "/home/fabio/src/git_repositories/FARM/examples/natural_questions.py", line 135, in question_answering
result = model.inference_from_dicts(dicts=QA_input, return_json=False) # result is a list of QAPred objects
File "/home/fabio/src/git_repositories/FARM/farm/infer.py", line 696, in inference_from_dicts
return Inferencer.inference_from_dicts(self, dicts, return_json=return_json,
File "/home/fabio/src/git_repositories/FARM/farm/infer.py", line 474, in inference_from_dicts
return list(predictions)
File "/home/fabio/src/git_repositories/FARM/farm/infer.py", line 545, in _inference_with_multiprocessing
for dataset, tensor_names, baskets in results:
File "/usr/lib/python3.8/multiprocessing/pool.py", line 865, in next
raise value
TypeError: string indices must be integers
|
TypeError
|
def split_file(
filepath, output_dir, docs_per_file=1_000, delimiter="", encoding="utf-8"
):
total_lines = sum(1 for line in open(filepath, encoding=encoding))
output_file_number = 1
doc_count = 0
lines_to_write = []
with ExitStack() as stack:
input_file = stack.enter_context(open(filepath, "r", encoding=encoding))
for line_num, line in enumerate(
tqdm(input_file, desc="Splitting file ...", total=total_lines)
):
lines_to_write.append(line)
if line.strip() == delimiter:
doc_count += 1
if doc_count % docs_per_file == 0:
filename = output_dir / f"part_{output_file_number}"
os.makedirs(os.path.dirname(filename), exist_ok=True)
write_file = stack.enter_context(
open(
filename,
"w+",
encoding=encoding,
buffering=10 * 1024 * 1024,
)
)
write_file.writelines(lines_to_write)
write_file.close()
output_file_number += 1
lines_to_write = []
if lines_to_write:
filename = output_dir / f"part_{output_file_number}"
os.makedirs(os.path.dirname(filename), exist_ok=True)
write_file = stack.enter_context(
open(filename, "w+", encoding=encoding, buffering=10 * 1024 * 1024)
)
write_file.writelines(lines_to_write)
write_file.close()
logger.info(
f"The input file {filepath} is split in {output_file_number} parts at {output_dir}."
)
|
def split_file(
filepath, output_dir, docs_per_file=1_000, delimiter="", encoding="utf-8"
):
total_lines = sum(1 for line in open(filepath, encoding=encoding))
output_file_number = 1
doc_count = 0
lines_to_write = []
with ExitStack() as stack:
input_file = stack.enter_context(open(filepath, "r", encoding=encoding))
for line_num, line in enumerate(
tqdm(input_file, desc="Splitting file ...", total=total_lines)
):
lines_to_write.append(line)
if line.strip() == delimiter:
doc_count += 1
if doc_count % docs_per_file == 0:
filename = output_dir / f"part_{output_file_number}"
os.makedirs(os.path.dirname(filename), exist_ok=True)
write_file = stack.enter_context(
open(filename, "w+", buffering=10 * 1024 * 1024)
)
write_file.writelines(lines_to_write)
write_file.close()
output_file_number += 1
lines_to_write = []
if lines_to_write:
filename = output_dir / f"part_{output_file_number}"
os.makedirs(os.path.dirname(filename), exist_ok=True)
write_file = stack.enter_context(
open(filename, "w+", buffering=10 * 1024 * 1024)
)
write_file.writelines(lines_to_write)
write_file.close()
logger.info(
f"The input file {filepath} is split in {output_file_number} parts at {output_dir}."
)
|
https://github.com/deepset-ai/FARM/issues/462
|
Splitting file ...: 5%|5 | 127877/2407713 [00:00<00:02, 869200.61it/s]
Traceback (most recent call last):
File "finetune_lm.py", line 43, in <module>
split_file(data_dir / "train.txt", output_dir=Path('/data/german_old_texts/processed/lm/split_files'), docs_per_file=20)
File "/home/user/farm/data_handler/utils.py", line 785, in split_file
write_file.writelines(lines_to_write)
UnicodeEncodeError: 'ascii' codec can't encode character '\xe4' in position 62: ordinal not in range(128)
|
UnicodeEncodeError
|
def load(
cls,
model_name_or_path,
batch_size=4,
gpu=False,
task_type=None,
return_class_probs=False,
strict=True,
max_seq_len=256,
doc_stride=128,
extraction_layer=None,
extraction_strategy=None,
):
"""
Load an Inferencer incl. all relevant components (model, tokenizer, processor ...) either by
1. specifying a public name from transformers' model hub (https://huggingface.co/models)
2. or pointing to a local directory it is saved in.
:param model_name_or_path: Local directory or public name of the model to load.
:type model_name_or_path: str
:param batch_size: Number of samples computed once per batch
:type batch_size: int
:param gpu: If GPU shall be used
:type gpu: bool
:param task_type: Type of task the model should be used for. Currently supporting:
"embeddings", "question_answering", "text_classification", "ner". More coming soon...
:param task_type: str
:param strict: whether to strictly enforce that the keys loaded from saved model match the ones in
the PredictionHead (see torch.nn.module.load_state_dict()).
Set to `False` for backwards compatibility with PHs saved with older version of FARM.
:type strict: bool
:param max_seq_len: maximum length of one text sample
:type max_seq_len: int
:param doc_stride: Only QA: When input text is longer than max_seq_len it gets split into parts, strided by doc_stride
:type doc_stride: int
:param extraction_strategy: Strategy to extract vectors. Choices: 'cls_token' (sentence vector), 'reduce_mean'
(sentence vector), reduce_max (sentence vector), 'per_token' (individual token vectors)
:type extraction_strategy: str
:param extraction_layer: number of layer from which the embeddings shall be extracted. Default: -1 (very last layer).
:type extraction_layer: int
:return: An instance of the Inferencer.
"""
device, n_gpu = initialize_device_settings(
use_cuda=gpu, local_rank=-1, use_amp=None
)
name = os.path.basename(model_name_or_path)
# a) either from local dir
if os.path.exists(model_name_or_path):
model = BaseAdaptiveModel.load(
load_dir=model_name_or_path, device=device, strict=strict
)
if task_type == "embeddings":
processor = InferenceProcessor.load_from_dir(model_name_or_path)
else:
processor = Processor.load_from_dir(model_name_or_path)
# b) or from remote transformers model hub
else:
logger.info(
f"Could not find `{model_name_or_path}` locally. Try to download from model hub ..."
)
if not task_type:
raise ValueError(
"Please specify the 'task_type' of the model you want to load from transformers. "
"Valid options for arg `task_type`:"
"'question_answering', 'embeddings', 'text_classification', 'ner'"
)
model = AdaptiveModel.convert_from_transformers(
model_name_or_path, device, task_type
)
config = AutoConfig.from_pretrained(model_name_or_path)
tokenizer = Tokenizer.load(model_name_or_path)
# TODO infer task_type automatically from config (if possible)
if task_type == "question_answering":
processor = SquadProcessor(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
label_list=["start_token", "end_token"],
metric="squad",
data_dir="data",
doc_stride=doc_stride,
)
elif task_type == "embeddings":
processor = InferenceProcessor(tokenizer=tokenizer, max_seq_len=max_seq_len)
elif task_type == "text_classification":
label_list = list(config.id2label[id] for id in range(len(config.id2label)))
processor = TextClassificationProcessor(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
data_dir="data",
label_list=label_list,
label_column_name="label",
metric="acc",
quote_char='"',
)
elif task_type == "ner":
label_list = list(config.label2id.keys())
processor = NERProcessor(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
data_dir="data",
metric="seq_f1",
label_list=label_list,
)
else:
raise ValueError(
f"`task_type` {task_type} is not supported yet. "
f"Valid options for arg `task_type`: 'question_answering', "
f"'embeddings', 'text_classification', 'ner'"
)
return cls(
model,
processor,
task_type=task_type,
batch_size=batch_size,
gpu=gpu,
name=name,
return_class_probs=return_class_probs,
extraction_strategy=extraction_strategy,
extraction_layer=extraction_layer,
)
|
def load(
cls,
model_name_or_path,
batch_size=4,
gpu=False,
task_type=None,
return_class_probs=False,
strict=True,
max_seq_len=256,
doc_stride=128,
extraction_layer=None,
extraction_strategy=None,
):
"""
Load an Inferencer incl. all relevant components (model, tokenizer, processor ...) either by
1. specifying a public name from transformers' model hub (https://huggingface.co/models)
2. or pointing to a local directory it is saved in.
:param model_name_or_path: Local directory or public name of the model to load.
:type model_name_or_path: str
:param batch_size: Number of samples computed once per batch
:type batch_size: int
:param gpu: If GPU shall be used
:type gpu: bool
:param task_type: Type of task the model should be used for. Currently supporting:
"embeddings", "question_answering", "text_classification", "ner". More coming soon...
:param task_type: str
:param strict: whether to strictly enforce that the keys loaded from saved model match the ones in
the PredictionHead (see torch.nn.module.load_state_dict()).
Set to `False` for backwards compatibility with PHs saved with older version of FARM.
:type strict: bool
:param max_seq_len: maximum length of one text sample
:type max_seq_len: int
:param doc_stride: Only QA: When input text is longer than max_seq_len it gets split into parts, strided by doc_stride
:type doc_stride: int
:param extraction_strategy: Strategy to extract vectors. Choices: 'cls_token' (sentence vector), 'reduce_mean'
(sentence vector), reduce_max (sentence vector), 'per_token' (individual token vectors)
:type extraction_strategy: str
:param extraction_layer: number of layer from which the embeddings shall be extracted. Default: -1 (very last layer).
:type extraction_layer: int
:return: An instance of the Inferencer.
"""
device, n_gpu = initialize_device_settings(
use_cuda=gpu, local_rank=-1, use_amp=None
)
name = os.path.basename(model_name_or_path)
# a) either from local dir
if os.path.exists(model_name_or_path):
model = BaseAdaptiveModel.load(
load_dir=model_name_or_path, device=device, strict=strict
)
if task_type == "embeddings":
processor = InferenceProcessor.load_from_dir(model_name_or_path)
else:
processor = Processor.load_from_dir(model_name_or_path)
# b) or from remote transformers model hub
else:
logger.info(
f"Could not find `{model_name_or_path}` locally. Try to download from model hub ..."
)
if not task_type:
raise ValueError(
"Please specify the 'task_type' of the model you want to load from transformers. "
"Valid options for arg `task_type`:"
"'question_answering', 'embeddings', 'text_classification', 'ner'"
)
model = AdaptiveModel.convert_from_transformers(
model_name_or_path, device, task_type
)
config = AutoConfig.from_pretrained(model_name_or_path)
tokenizer = Tokenizer.load(model_name_or_path)
# TODO infer task_type automatically from config (if possible)
if task_type == "question_answering":
processor = SquadProcessor(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
label_list=["start_token", "end_token"],
metric="squad",
data_dir=None,
doc_stride=doc_stride,
)
elif task_type == "embeddings":
processor = InferenceProcessor(tokenizer=tokenizer, max_seq_len=max_seq_len)
elif task_type == "text_classification":
label_list = list(config.id2label[id] for id in range(len(config.id2label)))
processor = TextClassificationProcessor(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
data_dir=None,
label_list=label_list,
label_column_name="label",
metric="acc",
quote_char='"',
)
elif task_type == "ner":
label_list = list(config.label2id.keys())
processor = NERProcessor(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
data_dir=None,
metric="seq_f1",
label_list=label_list,
)
else:
raise ValueError(
f"`task_type` {task_type} is not supported yet. "
f"Valid options for arg `task_type`: 'question_answering', "
f"'embeddings', 'text_classification', 'ner'"
)
return cls(
model,
processor,
task_type=task_type,
batch_size=batch_size,
gpu=gpu,
name=name,
return_class_probs=return_class_probs,
extraction_strategy=extraction_strategy,
extraction_layer=extraction_layer,
)
|
https://github.com/deepset-ai/FARM/issues/299
|
03/28/2020 22:25:07 - INFO - farm.utils - device: cpu n_gpu: 0, distributed training: False, automatic mixed precision training: None
03/28/2020 22:25:07 - INFO - farm.modeling.adaptive_model - Found files for loading 1 prediction heads
03/28/2020 22:25:07 - WARNING - farm.modeling.prediction_head - Some unused parameters are passed to the QuestionAnsweringHead. Might not be a problem. Params: {"training": true, "num_labels": 2, "ph_output_type": "per_token_squad", "model_type": "span_classification", "name": "QuestionAnsweringHead"}
03/28/2020 22:25:07 - INFO - farm.modeling.prediction_head - Prediction head initialized with size [768, 2]
03/28/2020 22:25:07 - INFO - farm.modeling.prediction_head - Loading prediction head from ../../saved_models/twmkn9/albert-base-v2-squad2/prediction_head_0.bin
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~/Documents/CodingProjects/NLPofTimFerrissShow/QnA_with_Tim_Haystack.py in
51
52 # Load model
----> 53 reader = FARMReader(model_name_or_path="../../saved_models/twmkn9/albert-base-v2-squad2", use_gpu=False)
54 # A retriever identifies the k most promising chunks of text that might contain the answer for our question
55 # Retrievers use some simple but fast algorithm, here: TF-IDF
/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/haystack/reader/farm.py in __init__(self, model_name_or_path, context_window_size, batch_size, use_gpu, no_ans_boost, top_k_per_candidate, top_k_per_sample, max_processes, max_seq_len, doc_stride)
79 self.inferencer = Inferencer.load(model_name_or_path, batch_size=batch_size, gpu=use_gpu,
80 task_type="question_answering", max_seq_len=max_seq_len,
---> 81 doc_stride=doc_stride)
82 self.inferencer.model.prediction_heads[0].context_window_size = context_window_size
83 self.inferencer.model.prediction_heads[0].no_ans_boost = no_ans_boost
/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/farm/infer.py in load(cls, model_name_or_path, batch_size, gpu, task_type, return_class_probs, strict, max_seq_len, doc_stride)
139 processor = InferenceProcessor.load_from_dir(model_name_or_path)
140 else:
--> 141 processor = Processor.load_from_dir(model_name_or_path)
142
143 # b) or from remote transformers model hub
/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/farm/data_handler/processor.py in load_from_dir(cls, load_dir)
189 del config["tokenizer"]
190
--> 191 processor = cls.load(tokenizer=tokenizer, processor_name=config["processor"], **config)
192
193 for task_name, task in config["tasks"].items():
TypeError: load() missing 1 required positional argument: 'data_dir'
|
TypeError
|
def __init__(
self,
tokenizer,
max_seq_len,
data_dir,
train_filename="train.txt",
dev_filename="dev.txt",
test_filename="test.txt",
dev_split=0.0,
next_sent_pred=True,
max_docs=None,
proxies=None,
**kwargs,
):
"""
:param tokenizer: Used to split a sentence (str) into tokens.
:param max_seq_len: Samples are truncated after this many tokens.
:type max_seq_len: int
:param data_dir: The directory in which the train and dev files can be found. Squad has a private test file
:type data_dir: str
:param label_list: list of labels to predict (strings). For most cases this should be: ["start_token", "end_token"]
:type label_list: list
:param metric: name of metric that shall be used for evaluation, e.g. "acc" or "f1_macro".
Alternatively you can also supply a custom function, that takes preds and labels as args and returns a numerical value.
For using multiple metrics supply them as a list, e.g ["acc", my_custom_metric_fn].
:type metric: str, function, or list
:param train_filename: The name of the file containing training data.
:type train_filename: str
:param dev_filename: The name of the file containing the dev data. If None and 0.0 < dev_split < 1.0 the dev set
will be a slice of the train set.
:type dev_filename: str or None
:param test_filename: None
:type test_filename: str
:param dev_split: The proportion of the train set that will sliced. Only works if dev_filename is set to None
:type dev_split: float
:param next_sent_pred: Whether to use next_sentence_prediction objective or not
:type next_sent_pred: bool
:param max_docs: maximum number of documents to include from input dataset
:type max_docs: int
:param proxies: proxy configuration to allow downloads of remote datasets.
Format as in "requests" library: https://2.python-requests.org//en/latest/user/advanced/#proxies
:type proxies: dict
:param kwargs: placeholder for passing generic parameters
:type kwargs: object
"""
self.delimiter = ""
self.max_docs = max_docs
super(BertStyleLMProcessor, self).__init__(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
dev_split=dev_split,
data_dir=data_dir,
tasks={},
proxies=proxies,
)
self.next_sent_pred = next_sent_pred
added_tokens = self.get_added_tokens()
self.add_task("lm", "acc", list(self.tokenizer.vocab) + added_tokens)
if self.next_sent_pred:
self.add_task("nextsentence", "acc", ["False", "True"])
|
def __init__(
self,
tokenizer,
max_seq_len,
data_dir,
train_filename="train.txt",
dev_filename="dev.txt",
test_filename="test.txt",
dev_split=0.0,
next_sent_pred=True,
max_docs=None,
proxies=None,
**kwargs,
):
"""
:param tokenizer: Used to split a sentence (str) into tokens.
:param max_seq_len: Samples are truncated after this many tokens.
:type max_seq_len: int
:param data_dir: The directory in which the train and dev files can be found. Squad has a private test file
:type data_dir: str
:param label_list: list of labels to predict (strings). For most cases this should be: ["start_token", "end_token"]
:type label_list: list
:param metric: name of metric that shall be used for evaluation, e.g. "acc" or "f1_macro".
Alternatively you can also supply a custom function, that takes preds and labels as args and returns a numerical value.
For using multiple metrics supply them as a list, e.g ["acc", my_custom_metric_fn].
:type metric: str, function, or list
:param train_filename: The name of the file containing training data.
:type train_filename: str
:param dev_filename: The name of the file containing the dev data. If None and 0.0 < dev_split < 1.0 the dev set
will be a slice of the train set.
:type dev_filename: str or None
:param test_filename: None
:type test_filename: str
:param dev_split: The proportion of the train set that will sliced. Only works if dev_filename is set to None
:type dev_split: float
:param next_sent_pred: Whether to use next_sentence_prediction objective or not
:type next_sent_pred: bool
:param max_docs: maximum number of documents to include from input dataset
:type max_docs: int
:param proxies: proxy configuration to allow downloads of remote datasets.
Format as in "requests" library: https://2.python-requests.org//en/latest/user/advanced/#proxies
:type proxies: dict
:param kwargs: placeholder for passing generic parameters
:type kwargs: object
"""
self.delimiter = ""
self.max_docs = max_docs
super(BertStyleLMProcessor, self).__init__(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
dev_split=dev_split,
data_dir=data_dir,
tasks={},
proxies=proxies,
)
self.next_sent_pred = next_sent_pred
self.add_task("lm", "acc", list(self.tokenizer.vocab))
if self.next_sent_pred:
self.add_task("nextsentence", "acc", ["False", "True"])
|
https://github.com/deepset-ai/FARM/issues/193
|
Train epoch 1/1 (Cur. train loss: 0.6664): 18%|█▊ | 30/170 [00:48<03:43, 1.60s/it]
Evaluating: 0%| | 0/319 [00:00<?, ?it/s]
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-3-f9d4de447982> in <module>()
1 # 7. Let it grow! Watch the tracked metrics live on the public mlflow server: https://public-mlflow.deepset.ai
----> 2 model = trainer.train(model)
~/bertclassifier/FARM/farm/train.py in train(self, model)
224 ):
225 evalnr += 1
--> 226 result = self.evaluator_dev.eval(model)
227 self.evaluator_dev.log_results(result, "Dev", self.global_step)
228 if self.early_stopping:
~/bertclassifier/FARM/farm/eval.py in eval(self, model, return_preds_and_labels)
71 losses_per_head = model.logits_to_loss_per_head(logits=logits, **batch)
72 preds = model.logits_to_preds(logits=logits, **batch)
---> 73 labels = model.prepare_labels(**batch)
74
75 # stack results of all batches per prediction head
~/bertclassifier/FARM/farm/modeling/adaptive_model.py in prepare_labels(self, **kwargs)
170 # all_labels.append(labels)
171 for head in self.prediction_heads:
--> 172 labels = head.prepare_labels(**kwargs)
173 all_labels.append(labels)
174 return all_labels
~/bertclassifier/FARM/farm/modeling/prediction_head.py in prepare_labels(self, **kwargs)
662 # we have a batch of sequences here. we need to convert for each token in each sequence.
663 for ids_for_sequence in label_ids:
--> 664 labels.append([self.label_list[int(x)] for x in ids_for_sequence if int(x) != -1])
665 return labels
666
~/bertclassifier/FARM/farm/modeling/prediction_head.py in <listcomp>(.0)
662 # we have a batch of sequences here. we need to convert for each token in each sequence.
663 for ids_for_sequence in label_ids:
--> 664 labels.append([self.label_list[int(x)] for x in ids_for_sequence if int(x) != -1])
665 return labels
666
IndexError: list index out of range
|
IndexError
|
def eval(self, model):
"""
Performs evaluation on a given model.
:param model: The model on which to perform evaluation
:type model: AdaptiveModel
:return all_results: A list of dictionaries, one for each prediction head. Each dictionary contains the metrics
and reports generated during evaluation.
:rtype all_results: list of dicts
"""
model.eval()
# init empty lists per prediction head
loss_all = [0 for _ in model.prediction_heads]
preds_all = [[] for _ in model.prediction_heads]
label_all = [[] for _ in model.prediction_heads]
for step, batch in enumerate(
tqdm(self.data_loader, desc="Evaluating", mininterval=10)
):
batch = {key: batch[key].to(self.device) for key in batch}
with torch.no_grad():
logits = model.forward(**batch)
# TODO logits_to_loss should be a single, overloaded function
losses_per_head = model.logits_to_loss_per_head(logits=logits, **batch)
preds = model.logits_to_preds(logits=logits, **batch)
labels = model.prepare_labels(**batch)
# stack results of all batches per prediction head
for head_num, head in enumerate(model.prediction_heads):
loss_all[head_num] += np.sum(to_numpy(losses_per_head[head_num]))
if head.model_type == "span_classification":
# TODO check why adaptive model doesnt pack preds into list of list of tuples
preds_all[head_num] += preds
label_all[head_num] += labels
else:
preds_all[head_num] += list(to_numpy(preds[head_num]))
label_all[head_num] += list(to_numpy(labels[head_num]))
# Evaluate per prediction head
all_results = []
for head_num, head in enumerate(model.prediction_heads):
if head.model_type == "multilabel_text_classification":
# converting from string preds back to multi-hot encoding
from sklearn.preprocessing import MultiLabelBinarizer
mlb = MultiLabelBinarizer(classes=head.label_list)
# TODO check why .fit() should be called on predictions, rather than on labels
preds_all[head_num] = mlb.fit_transform(preds_all[head_num])
label_all[head_num] = mlb.transform(label_all[head_num])
elif head.model_type == "span_classification":
temp = head._aggregate_preds(preds_all[head_num])
preds_all[head_num] = temp
result = {
"loss": loss_all[head_num] / len(self.data_loader.dataset),
"task_name": head.task_name,
}
result.update(
compute_metrics(
metric=head.metric,
preds=preds_all[head_num],
labels=label_all[head_num],
)
)
# Select type of report depending on prediction head output type
if self.classification_report:
if head.ph_output_type == "per_token":
report_fn = token_classification_report
elif head.ph_output_type == "per_sequence":
report_fn = classification_report
elif head.ph_output_type == "per_token_squad":
report_fn = lambda *args, **kwargs: "not Implemented"
elif head.ph_output_type == "per_sequence_continuous":
report_fn = r2_score
else:
raise NotImplementedError
# CHANGE PARAMETERS, not all report_fn accept digits
if head.ph_output_type in ["per_sequence_continuous", "per_token"]:
result["report"] = report_fn(label_all[head_num], preds_all[head_num])
else:
# supply labels as all possible combination because if ground truth labels do not cover
# all values in label_list (maybe dev set is small), the report will break
if head.model_type == "multilabel_text_classification":
# For multilabel classification, we don't eval with string labels here, but with multihot vectors.
# Therefore we need to supply all possible label ids instead of label values.
all_possible_labels = list(range(len(head.label_list)))
else:
all_possible_labels = head.label_list
result["report"] = report_fn(
label_all[head_num],
preds_all[head_num],
digits=4,
labels=all_possible_labels,
target_names=head.label_list,
)
all_results.append(result)
return all_results
|
def eval(self, model):
"""
Performs evaluation on a given model.
:param model: The model on which to perform evaluation
:type model: AdaptiveModel
:return all_results: A list of dictionaries, one for each prediction head. Each dictionary contains the metrics
and reports generated during evaluation.
:rtype all_results: list of dicts
"""
model.eval()
# init empty lists per prediction head
loss_all = [0 for _ in model.prediction_heads]
preds_all = [[] for _ in model.prediction_heads]
label_all = [[] for _ in model.prediction_heads]
for step, batch in enumerate(
tqdm(self.data_loader, desc="Evaluating", mininterval=10)
):
batch = {key: batch[key].to(self.device) for key in batch}
with torch.no_grad():
logits = model.forward(**batch)
# TODO logits_to_loss should be a single, overloaded function
losses_per_head = model.logits_to_loss_per_head(logits=logits, **batch)
preds = model.logits_to_preds(logits=logits, **batch)
labels = model.prepare_labels(**batch)
# stack results of all batches per prediction head
for head_num, head in enumerate(model.prediction_heads):
loss_all[head_num] += np.sum(to_numpy(losses_per_head[head_num]))
if head.model_type == "span_classification":
# TODO check why adaptive model doesnt pack preds into list of list of tuples
preds_all[head_num] += preds
label_all[head_num] += labels
else:
preds_all[head_num] += list(to_numpy(preds[head_num]))
label_all[head_num] += list(to_numpy(labels[head_num]))
# Evaluate per prediction head
all_results = []
for head_num, head in enumerate(model.prediction_heads):
if head.model_type == "multilabel_text_classification":
# converting from string preds back to multi-hot encoding
from sklearn.preprocessing import MultiLabelBinarizer
mlb = MultiLabelBinarizer(classes=head.label_list)
# TODO check why .fit() should be called on predictions, rather than on labels
preds_all[head_num] = mlb.fit_transform(preds_all[head_num])
label_all[head_num] = mlb.transform(label_all[head_num])
elif head.model_type == "span_classification":
temp = head._aggregate_preds(preds_all[head_num])
preds_all[head_num] = temp
result = {
"loss": loss_all[head_num] / len(self.data_loader.dataset),
"task_name": head.task_name,
}
result.update(
compute_metrics(
metric=head.metric,
preds=preds_all[head_num],
labels=label_all[head_num],
)
)
# Select type of report depending on prediction head output type
if self.classification_report:
if head.ph_output_type == "per_token":
report_fn = token_classification_report
elif head.ph_output_type == "per_sequence":
report_fn = classification_report
elif head.ph_output_type == "per_token_squad":
report_fn = lambda *args, **kwargs: "not Implemented"
elif head.ph_output_type == "per_sequence_continuous":
report_fn = r2_score
else:
raise NotImplementedError
# CHANGE PARAMETERS, not all report_fn accept digits
if head.ph_output_type in ["per_sequence_continuous", "per_token"]:
result["report"] = report_fn(label_all[head_num], preds_all[head_num])
else:
# supply labels as all possible combination because if ground truth labels do not cover
# all values in label_list (maybe dev set is small), the report will break
result["report"] = report_fn(
label_all[head_num],
preds_all[head_num],
digits=4,
labels=head.label_list,
target_names=head.label_list,
)
all_results.append(result)
return all_results
|
https://github.com/deepset-ai/FARM/issues/148
|
${PYTHONENVHOME}/lib/python3.6/site-packages/numpy/lib/arraysetops.py:564: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
mask &= (ar1 != a)
Traceback (most recent call last):
File "doc_classification_multilabel.py", line 97, in <module>
model = trainer.train(model)
File "${PYTHONENVHOME}/lib/python3.6/site-packages/farm-0.3.1-py3.6.egg/farm/train.py", line 163, in train
File "${PYTHONENVHOME}/lib/python3.6/site-packages/farm-0.3.1-py3.6.egg/farm/eval.py", line 138, in eval
File "${PYTHONENVHOME}/lib/python3.6/site-packages/sklearn/metrics/classification.py", line 1886, in classification_report
sample_weight=sample_weight)
File "${PYTHONENVHOME}/lib/python3.6/site-packages/sklearn/metrics/classification.py", line 1421, in precision_recall_fscore_support
labels=labels, samplewise=samplewise)
File "${PYTHONENVHOME}/lib/python3.6/site-packages/sklearn/metrics/classification.py", line 457, in multilabel_confusion_matrix
if np.max(labels) > np.max(present_labels):
File "<__array_function__ internals>", line 6, in amax
File "${PYTHONENVHOME}/lib/python3.6/site-packages/numpy/core/fromnumeric.py", line 2621, in amax
keepdims=keepdims, initial=initial, where=where)
File "${PYTHONENVHOME}/lib/python3.6/site-packages/numpy/core/fromnumeric.py", line 90, in _wrapreduction
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
TypeError: cannot perform reduce with flexible type
|
TypeError
|
def __init__(
self,
tokenizer,
max_seq_len,
data_dir,
label_list=None,
metric=None,
train_filename="train.tsv",
dev_filename=None,
test_filename="test.tsv",
dev_split=0.1,
delimiter="\t",
quote_char="'",
skiprows=None,
label_column_name="label",
multilabel=False,
header=0,
**kwargs,
):
# TODO If an arg is misspelt, e.g. metrics, it will be swallowed silently by kwargs
# Custom processor attributes
self.delimiter = delimiter
self.quote_char = quote_char
self.skiprows = skiprows
self.header = header
super(TextClassificationProcessor, self).__init__(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
dev_split=dev_split,
data_dir=data_dir,
tasks={},
)
# TODO raise info when no task is added due to missing "metric" or "labels" arg
if metric and label_list:
if multilabel:
task_type = "multilabel_classification"
else:
task_type = "classification"
self.add_task(
name="text_classification",
metric=metric,
label_list=label_list,
label_column_name=label_column_name,
task_type=task_type,
)
else:
logger.info(
"Initialized processor without tasks. Supply `metric` and `label_list` to the constructor for "
"using the default task or add a custom task later via processor.add_task()"
)
|
def __init__(
self,
tokenizer,
max_seq_len,
data_dir,
label_list=None,
metric=None,
train_filename="train.tsv",
dev_filename=None,
test_filename="test.tsv",
dev_split=0.1,
delimiter="\t",
quote_char="'",
skiprows=None,
label_column_name="label",
multilabel=False,
header=0,
**kwargs,
):
# TODO If an arg is misspelt, e.g. metrics, it will be swallowed silently by kwargs
# Custom processor attributes
self.delimiter = delimiter
self.quote_char = quote_char
self.skiprows = skiprows
self.header = header
super(TextClassificationProcessor, self).__init__(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
dev_split=dev_split,
data_dir=data_dir,
tasks={},
)
# TODO raise info when no task is added due to missing "metric" or "labels" arg
if metric and label_list:
if multilabel:
task_type = "multilabel_classification"
else:
task_type = "classification"
self.add_task(
name="text_classification",
metric=metric,
label_list=label_list,
label_column_name=label_column_name,
task_type=task_type,
)
|
https://github.com/deepset-ai/FARM/issues/120
|
10/17/2019 20:16:51 - INFO - pytorch_transformers.modeling_utils - load
ing weights file https://s3.amazonaws.com/models.huggingface.co/bert/bert
-base-cased-pytorch_model.bin from cache at /root/.cache/torch/pytorch_tr
ansformers/35d8b9d36faaf46728a0192d82bf7d00137490cd6074e8500778afed552a67
e5.3fadbea36527ae472139fe84cddaa65454d7429f12d543d80bfc3ad70de55ac2
10/17/2019 20:16:54 - WARNING - farm.modeling.language_model - Could no
t automatically detect from language model name what language it is.
We guess it's an *ENGLISH* model ...
If not: Init the language model by supplying the 'language' param.
Example: Bert.load('my_mysterious_model_name', language='de')
10/17/2019 20:16:58 - INFO - farm.modeling.optimization - Number of opt
imization steps: 12220
Traceback (most recent call last):
File "run_all_experiments.py", line 36, in <module>
main()
File "run_all_experiments.py", line 33, in main
run_experiment(experiment)
File "/home/user/farm/experiment.py", line 147, in run_experiment
model = trainer.train(model)
File "/home/user/farm/train.py", line 129, in train
model.connect_heads_with_processor(self.data_silo.processor.tasks)
File "/home/user/farm/modeling/adaptive_model.py", line 233, in connect
_heads_with_processor
head.label_tensor_name = tasks[head.task_name]["label_tensor_name"]
KeyError: 'question_answering'
|
KeyError
|
def __init__(
self,
tokenizer,
max_seq_len,
data_dir,
label_list=None,
metric=None,
train_filename="train.txt",
dev_filename="dev.txt",
test_filename="test.txt",
dev_split=0.0,
delimiter="\t",
**kwargs,
):
# Custom processor attributes
self.delimiter = delimiter
super(NERProcessor, self).__init__(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
dev_split=dev_split,
data_dir=data_dir,
tasks={},
)
if metric and label_list:
self.add_task("ner", metric, label_list)
else:
logger.info(
"Initialized processor without tasks. Supply `metric` and `label_list` to the constructor for "
"using the default task or add a custom task later via processor.add_task()"
)
|
def __init__(
self,
tokenizer,
max_seq_len,
data_dir,
label_list=None,
metric=None,
train_filename="train.txt",
dev_filename="dev.txt",
test_filename="test.txt",
dev_split=0.0,
delimiter="\t",
**kwargs,
):
# Custom processor attributes
self.delimiter = delimiter
super(NERProcessor, self).__init__(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
dev_split=dev_split,
data_dir=data_dir,
tasks={},
)
if metric and label_list:
self.add_task("ner", metric, label_list)
|
https://github.com/deepset-ai/FARM/issues/120
|
10/17/2019 20:16:51 - INFO - pytorch_transformers.modeling_utils - load
ing weights file https://s3.amazonaws.com/models.huggingface.co/bert/bert
-base-cased-pytorch_model.bin from cache at /root/.cache/torch/pytorch_tr
ansformers/35d8b9d36faaf46728a0192d82bf7d00137490cd6074e8500778afed552a67
e5.3fadbea36527ae472139fe84cddaa65454d7429f12d543d80bfc3ad70de55ac2
10/17/2019 20:16:54 - WARNING - farm.modeling.language_model - Could no
t automatically detect from language model name what language it is.
We guess it's an *ENGLISH* model ...
If not: Init the language model by supplying the 'language' param.
Example: Bert.load('my_mysterious_model_name', language='de')
10/17/2019 20:16:58 - INFO - farm.modeling.optimization - Number of opt
imization steps: 12220
Traceback (most recent call last):
File "run_all_experiments.py", line 36, in <module>
main()
File "run_all_experiments.py", line 33, in main
run_experiment(experiment)
File "/home/user/farm/experiment.py", line 147, in run_experiment
model = trainer.train(model)
File "/home/user/farm/train.py", line 129, in train
model.connect_heads_with_processor(self.data_silo.processor.tasks)
File "/home/user/farm/modeling/adaptive_model.py", line 233, in connect
_heads_with_processor
head.label_tensor_name = tasks[head.task_name]["label_tensor_name"]
KeyError: 'question_answering'
|
KeyError
|
def __init__(
self,
tokenizer,
max_seq_len,
data_dir,
labels=None,
metric=None,
train_filename="train-v2.0.json",
dev_filename="dev-v2.0.json",
test_filename=None,
dev_split=0,
doc_stride=128,
max_query_length=64,
**kwargs,
):
"""
:param tokenizer: Used to split a sentence (str) into tokens.
:param max_seq_len: Samples are truncated after this many tokens.
:type max_seq_len: int
:param data_dir: The directory in which the train and dev files can be found. Squad has a private test file
:type data_dir: str
:param train_filename: The name of the file containing training data.
:type train_filename: str
:param dev_filename: The name of the file containing the dev data. If None and 0.0 < dev_split < 1.0 the dev set
will be a slice of the train set.
:type dev_filename: str or None
:param test_filename: None
:type test_filename: str
:param dev_split: The proportion of the train set that will sliced. Only works if dev_filename is set to None
:type dev_split: float
:param data_dir: The directory in which the train, test and perhaps dev files can be found.
:type data_dir: str
:param doc_stride: When the document containing the answer is too long it gets split into part, strided by doc_stride
:type doc_stride: int
:param max_query_length: Maximum length of the question (in number of subword tokens)
:type max_query_length: int
:param kwargs: placeholder for passing generic parameters
:type kwargs: object
"""
self.target = "classification"
self.ph_output_type = "per_token_squad"
self.doc_stride = doc_stride
self.max_query_length = max_query_length
super(SquadProcessor, self).__init__(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
dev_split=dev_split,
data_dir=data_dir,
tasks={},
)
if metric and labels:
self.add_task("question_answering", metric, labels)
else:
logger.info(
"Initialized processor without tasks. Supply `metric` and `label_list` to the constructor for "
"using the default task or add a custom task later via processor.add_task()"
)
|
def __init__(
self,
tokenizer,
max_seq_len,
data_dir,
labels=None,
metric=None,
train_filename="train-v2.0.json",
dev_filename="dev-v2.0.json",
test_filename=None,
dev_split=0,
doc_stride=128,
max_query_length=64,
**kwargs,
):
"""
:param tokenizer: Used to split a sentence (str) into tokens.
:param max_seq_len: Samples are truncated after this many tokens.
:type max_seq_len: int
:param data_dir: The directory in which the train and dev files can be found. Squad has a private test file
:type data_dir: str
:param train_filename: The name of the file containing training data.
:type train_filename: str
:param dev_filename: The name of the file containing the dev data. If None and 0.0 < dev_split < 1.0 the dev set
will be a slice of the train set.
:type dev_filename: str or None
:param test_filename: None
:type test_filename: str
:param dev_split: The proportion of the train set that will sliced. Only works if dev_filename is set to None
:type dev_split: float
:param data_dir: The directory in which the train, test and perhaps dev files can be found.
:type data_dir: str
:param doc_stride: When the document containing the answer is too long it gets split into part, strided by doc_stride
:type doc_stride: int
:param max_query_length: Maximum length of the question (in number of subword tokens)
:type max_query_length: int
:param kwargs: placeholder for passing generic parameters
:type kwargs: object
"""
self.target = "classification"
self.ph_output_type = "per_token_squad"
self.doc_stride = doc_stride
self.max_query_length = max_query_length
super(SquadProcessor, self).__init__(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
dev_split=dev_split,
data_dir=data_dir,
tasks={},
)
if metric and labels:
self.add_task("question_answering", metric, labels)
|
https://github.com/deepset-ai/FARM/issues/120
|
10/17/2019 20:16:51 - INFO - pytorch_transformers.modeling_utils - load
ing weights file https://s3.amazonaws.com/models.huggingface.co/bert/bert
-base-cased-pytorch_model.bin from cache at /root/.cache/torch/pytorch_tr
ansformers/35d8b9d36faaf46728a0192d82bf7d00137490cd6074e8500778afed552a67
e5.3fadbea36527ae472139fe84cddaa65454d7429f12d543d80bfc3ad70de55ac2
10/17/2019 20:16:54 - WARNING - farm.modeling.language_model - Could no
t automatically detect from language model name what language it is.
We guess it's an *ENGLISH* model ...
If not: Init the language model by supplying the 'language' param.
Example: Bert.load('my_mysterious_model_name', language='de')
10/17/2019 20:16:58 - INFO - farm.modeling.optimization - Number of opt
imization steps: 12220
Traceback (most recent call last):
File "run_all_experiments.py", line 36, in <module>
main()
File "run_all_experiments.py", line 33, in main
run_experiment(experiment)
File "/home/user/farm/experiment.py", line 147, in run_experiment
model = trainer.train(model)
File "/home/user/farm/train.py", line 129, in train
model.connect_heads_with_processor(self.data_silo.processor.tasks)
File "/home/user/farm/modeling/adaptive_model.py", line 233, in connect
_heads_with_processor
head.label_tensor_name = tasks[head.task_name]["label_tensor_name"]
KeyError: 'question_answering'
|
KeyError
|
def __init__(
self,
tokenizer,
max_seq_len,
data_dir,
label_list=None,
metric=None,
train_filename="train-v2.0.json",
dev_filename="dev-v2.0.json",
test_filename=None,
dev_split=0,
doc_stride=128,
max_query_length=64,
**kwargs,
):
"""
:param tokenizer: Used to split a sentence (str) into tokens.
:param max_seq_len: Samples are truncated after this many tokens.
:type max_seq_len: int
:param data_dir: The directory in which the train and dev files can be found. Squad has a private test file
:type data_dir: str
:param label_list: list of labels to predict (strings). For most cases this should be: ["start_token", "end_token"]
:type label_list: list
:param metric: name of metric that shall be used for evaluation, e.g. "squad".
:type metric: str
:param train_filename: The name of the file containing training data.
:type train_filename: str
:param dev_filename: The name of the file containing the dev data. If None and 0.0 < dev_split < 1.0 the dev set
will be a slice of the train set.
:type dev_filename: str or None
:param test_filename: None
:type test_filename: str
:param dev_split: The proportion of the train set that will sliced. Only works if dev_filename is set to None
:type dev_split: float
:param doc_stride: When the document containing the answer is too long it gets split into part, strided by doc_stride
:type doc_stride: int
:param max_query_length: Maximum length of the question (in number of subword tokens)
:type max_query_length: int
:param kwargs: placeholder for passing generic parameters
:type kwargs: object
"""
self.target = "classification"
self.ph_output_type = "per_token_squad"
self.doc_stride = doc_stride
self.max_query_length = max_query_length
super(SquadProcessor, self).__init__(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
dev_split=dev_split,
data_dir=data_dir,
tasks={},
)
if metric and label_list:
self.add_task("question_answering", metric, label_list)
else:
logger.info(
"Initialized processor without tasks. Supply `metric` and `label_list` to the constructor for "
"using the default task or add a custom task later via processor.add_task()"
)
|
def __init__(
self,
tokenizer,
max_seq_len,
data_dir,
labels=None,
metric=None,
train_filename="train-v2.0.json",
dev_filename="dev-v2.0.json",
test_filename=None,
dev_split=0,
doc_stride=128,
max_query_length=64,
**kwargs,
):
"""
:param tokenizer: Used to split a sentence (str) into tokens.
:param max_seq_len: Samples are truncated after this many tokens.
:type max_seq_len: int
:param data_dir: The directory in which the train and dev files can be found. Squad has a private test file
:type data_dir: str
:param train_filename: The name of the file containing training data.
:type train_filename: str
:param dev_filename: The name of the file containing the dev data. If None and 0.0 < dev_split < 1.0 the dev set
will be a slice of the train set.
:type dev_filename: str or None
:param test_filename: None
:type test_filename: str
:param dev_split: The proportion of the train set that will sliced. Only works if dev_filename is set to None
:type dev_split: float
:param data_dir: The directory in which the train, test and perhaps dev files can be found.
:type data_dir: str
:param doc_stride: When the document containing the answer is too long it gets split into part, strided by doc_stride
:type doc_stride: int
:param max_query_length: Maximum length of the question (in number of subword tokens)
:type max_query_length: int
:param kwargs: placeholder for passing generic parameters
:type kwargs: object
"""
self.target = "classification"
self.ph_output_type = "per_token_squad"
self.doc_stride = doc_stride
self.max_query_length = max_query_length
super(SquadProcessor, self).__init__(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
dev_split=dev_split,
data_dir=data_dir,
tasks={},
)
if metric and labels:
self.add_task("question_answering", metric, labels)
else:
logger.info(
"Initialized processor without tasks. Supply `metric` and `label_list` to the constructor for "
"using the default task or add a custom task later via processor.add_task()"
)
|
https://github.com/deepset-ai/FARM/issues/120
|
10/17/2019 20:16:51 - INFO - pytorch_transformers.modeling_utils - load
ing weights file https://s3.amazonaws.com/models.huggingface.co/bert/bert
-base-cased-pytorch_model.bin from cache at /root/.cache/torch/pytorch_tr
ansformers/35d8b9d36faaf46728a0192d82bf7d00137490cd6074e8500778afed552a67
e5.3fadbea36527ae472139fe84cddaa65454d7429f12d543d80bfc3ad70de55ac2
10/17/2019 20:16:54 - WARNING - farm.modeling.language_model - Could no
t automatically detect from language model name what language it is.
We guess it's an *ENGLISH* model ...
If not: Init the language model by supplying the 'language' param.
Example: Bert.load('my_mysterious_model_name', language='de')
10/17/2019 20:16:58 - INFO - farm.modeling.optimization - Number of opt
imization steps: 12220
Traceback (most recent call last):
File "run_all_experiments.py", line 36, in <module>
main()
File "run_all_experiments.py", line 33, in main
run_experiment(experiment)
File "/home/user/farm/experiment.py", line 147, in run_experiment
model = trainer.train(model)
File "/home/user/farm/train.py", line 129, in train
model.connect_heads_with_processor(self.data_silo.processor.tasks)
File "/home/user/farm/modeling/adaptive_model.py", line 233, in connect
_heads_with_processor
head.label_tensor_name = tasks[head.task_name]["label_tensor_name"]
KeyError: 'question_answering'
|
KeyError
|
def __init__(self, processor, batch_size, distributed=False):
"""
:param processor: A dataset specific Processor object which will turn input (file or dict) into a Pytorch Dataset.
:type processor: Processor
:param batch_size: The size of batch that should be returned by the DataLoaders.
:type batch_size: int
:param distributed: Set to True if the program is running in a distributed setting.
:type distributed: bool
"""
self.distributed = distributed
self.processor = processor
self.data = {}
self.batch_size = batch_size
self.class_weights = None
self.max_processes = 128
self._load_data()
|
def __init__(
self, processor, batch_size, distributed=False, multiprocessing_chunk_size=100
):
"""
:param processor: A dataset specific Processor object which will turn input (file or dict) into a Pytorch Dataset.
:type processor: Processor
:param batch_size: The size of batch that should be returned by the DataLoaders.
:type batch_size: int
:param distributed: Set to True if the program is running in a distributed setting.
:type distributed: bool
"""
self.distributed = distributed
self.processor = processor
self.data = {}
self.batch_size = batch_size
self.class_weights = None
self.multiprocessing_chunk_size = multiprocessing_chunk_size
self.max_processes = 128
self._load_data()
|
https://github.com/deepset-ai/FARM/issues/113
|
10/11/2019 17:12:47 - INFO - farm.data_handler.data_silo - Loading dev set as a slice of train set
Traceback (most recent call last):
File ".../train.py", line 436, in <module>
augmentation=True)
File ".../train.py", line 348, in continue_finetuning
data_silo = DataSilo(processor=processor, batch_size=batch_size, multiprocessing_chunk_size=2000)
File "/.../farm/data_handler/data_silo.py", line 49, in __init__
self._load_data()
File ".../farm/data_handler/data_silo.py", line 104, in _load_data
self._create_dev_from_train()
File ".../farm/data_handler/data_silo.py", line 175, in _create_dev_from_train
train_dataset, dev_dataset = self.random_split_ConcatDataset(self.data["train"], lengths=[n_train, n_dev])
File ".../farm/data_handler/data_silo.py", line 200, in random_split_ConcatDataset
train = ConcatDataset(ds.datasets[:idx_dataset])
File ".../torch/utils/data/dataset.py", line 68, in __init__
assert len(datasets) > 0, 'datasets should not be an empty iterable'
AssertionError: datasets should not be an empty iterable
|
AssertionError
|
def _get_dataset(self, filename):
dicts = self.processor.file_to_dicts(filename)
# shuffle list of dicts here if we later want to have a random dev set splitted from train set
if self.processor.train_filename in filename:
if not self.processor.dev_filename:
if self.processor.dev_split > 0.0:
random.shuffle(dicts)
num_cpus = min(mp.cpu_count(), self.max_processes) or 1
dicts_per_cpu = np.ceil(len(dicts) / num_cpus)
# automatic adjustment of multiprocessing chunksize
# for small files (containing few dicts) we want small chunksize to ulitize all available cores but never less
# than 2, because we need it to sample another random sentence in LM finetuning
# for large files we want to minimize processor spawning without giving too much data to one process, so we
# clip it at 5k
multiprocessing_chunk_size = int(
np.clip((np.ceil(dicts_per_cpu / 5)), a_min=2, a_max=5000)
)
dict_batches_to_process = int(len(dicts) / multiprocessing_chunk_size)
num_cpus_used = (
min(mp.cpu_count(), self.max_processes, dict_batches_to_process) or 1
)
with ExitStack() as stack:
p = stack.enter_context(mp.Pool(processes=num_cpus_used))
logger.info(
f"Got ya {num_cpus_used} parallel workers to convert dict chunks to datasets (chunksize = {multiprocessing_chunk_size})..."
)
log_ascii_workers(num_cpus_used, logger)
results = p.imap(
partial(self._multiproc, processor=self.processor),
grouper(dicts, multiprocessing_chunk_size),
chunksize=1,
)
datasets = []
with tqdm(total=len(dicts), unit=" Dicts") as pbar:
for dataset, tensor_names in results:
datasets.append(dataset)
pbar.update(multiprocessing_chunk_size)
concat_datasets = ConcatDataset(datasets)
return concat_datasets, tensor_names
|
def _get_dataset(self, filename):
dicts = self.processor.file_to_dicts(filename)
# shuffle list of dicts here if we later want to have a random dev set splitted from train set
if self.processor.train_filename in filename:
if not self.processor.dev_filename:
if self.processor.dev_split > 0.0:
random.shuffle(dicts)
dict_batches_to_process = int(len(dicts) / self.multiprocessing_chunk_size)
num_cpus = min(mp.cpu_count(), self.max_processes, dict_batches_to_process) or 1
with ExitStack() as stack:
p = stack.enter_context(mp.Pool(processes=num_cpus))
logger.info(
f"Got ya {num_cpus} parallel workers to convert dict chunks to datasets (chunksize = {self.multiprocessing_chunk_size})..."
)
log_ascii_workers(num_cpus, logger)
results = p.imap(
partial(self._multiproc, processor=self.processor),
grouper(dicts, self.multiprocessing_chunk_size),
chunksize=1,
)
datasets = []
with tqdm(total=len(dicts), unit=" Dicts") as pbar:
for dataset, tensor_names in results:
datasets.append(dataset)
pbar.update(self.multiprocessing_chunk_size)
concat_datasets = ConcatDataset(datasets)
return concat_datasets, tensor_names
|
https://github.com/deepset-ai/FARM/issues/113
|
10/11/2019 17:12:47 - INFO - farm.data_handler.data_silo - Loading dev set as a slice of train set
Traceback (most recent call last):
File ".../train.py", line 436, in <module>
augmentation=True)
File ".../train.py", line 348, in continue_finetuning
data_silo = DataSilo(processor=processor, batch_size=batch_size, multiprocessing_chunk_size=2000)
File "/.../farm/data_handler/data_silo.py", line 49, in __init__
self._load_data()
File ".../farm/data_handler/data_silo.py", line 104, in _load_data
self._create_dev_from_train()
File ".../farm/data_handler/data_silo.py", line 175, in _create_dev_from_train
train_dataset, dev_dataset = self.random_split_ConcatDataset(self.data["train"], lengths=[n_train, n_dev])
File ".../farm/data_handler/data_silo.py", line 200, in random_split_ConcatDataset
train = ConcatDataset(ds.datasets[:idx_dataset])
File ".../torch/utils/data/dataset.py", line 68, in __init__
assert len(datasets) > 0, 'datasets should not be an empty iterable'
AssertionError: datasets should not be an empty iterable
|
AssertionError
|
def _create_dev_from_train(self):
n_dev = int(self.processor.dev_split * len(self.data["train"]))
n_train = len(self.data["train"]) - n_dev
train_dataset, dev_dataset = self.random_split_ConcatDataset(
self.data["train"], lengths=[n_train, n_dev]
)
self.data["train"] = train_dataset
if len(dev_dataset) > 0:
self.data["dev"] = dev_dataset
else:
logger.warning("No dev set created. Please adjust the dev_split parameter.")
logger.info(
f"Took {len(dev_dataset)} samples out of train set to create dev set (dev split is roughly {self.processor.dev_split})"
)
|
def _create_dev_from_train(self):
# TODO checks to ensure dev is loaded the right way
n_dev = int(self.processor.dev_split * len(self.data["train"]))
n_train = len(self.data["train"]) - n_dev
# Todo: Seed
# if(isinstance(self.data["train"], Dataset)):
# train_dataset, dev_dataset = random_split(self.data["train"], [n_train, n_dev])
# else:
train_dataset, dev_dataset = self.random_split_ConcatDataset(
self.data["train"], lengths=[n_train, n_dev]
)
self.data["train"] = train_dataset
if len(dev_dataset) > 0:
self.data["dev"] = dev_dataset
else:
logger.warning(
"No dev set created. Maybe adjust the dev_split parameter or the multiprocessing chunk size"
)
logger.info(
f"Took {len(dev_dataset)} samples out of train set to create dev set (dev split is roughly {self.processor.dev_split})"
)
|
https://github.com/deepset-ai/FARM/issues/113
|
10/11/2019 17:12:47 - INFO - farm.data_handler.data_silo - Loading dev set as a slice of train set
Traceback (most recent call last):
File ".../train.py", line 436, in <module>
augmentation=True)
File ".../train.py", line 348, in continue_finetuning
data_silo = DataSilo(processor=processor, batch_size=batch_size, multiprocessing_chunk_size=2000)
File "/.../farm/data_handler/data_silo.py", line 49, in __init__
self._load_data()
File ".../farm/data_handler/data_silo.py", line 104, in _load_data
self._create_dev_from_train()
File ".../farm/data_handler/data_silo.py", line 175, in _create_dev_from_train
train_dataset, dev_dataset = self.random_split_ConcatDataset(self.data["train"], lengths=[n_train, n_dev])
File ".../farm/data_handler/data_silo.py", line 200, in random_split_ConcatDataset
train = ConcatDataset(ds.datasets[:idx_dataset])
File ".../torch/utils/data/dataset.py", line 68, in __init__
assert len(datasets) > 0, 'datasets should not be an empty iterable'
AssertionError: datasets should not be an empty iterable
|
AssertionError
|
def random_split_ConcatDataset(self, ds, lengths):
"""
Roughly split a Concatdataset into non-overlapping new datasets of given lengths.
Samples inside Concatdataset should already be shuffled
Arguments:
ds (Dataset): Dataset to be split
lengths (sequence): lengths of splits to be produced
"""
if sum(lengths) != len(ds):
raise ValueError(
"Sum of input lengths does not equal the length of the input dataset!"
)
idx_dataset = np.where(np.array(ds.cumulative_sizes) > lengths[0])[0][0]
assert idx_dataset >= 1, (
"Dev_split ratio is too large, there is no data in train set. "
f"Please lower dev_split = {self.processor.dev_split}"
)
train = ConcatDataset(ds.datasets[:idx_dataset])
test = ConcatDataset(ds.datasets[idx_dataset:])
return train, test
|
def random_split_ConcatDataset(self, ds, lengths):
"""
Roughly split a Concatdataset into non-overlapping new datasets of given lengths.
Samples inside Concatdataset should already be shuffled
Arguments:
ds (Dataset): Dataset to be split
lengths (sequence): lengths of splits to be produced
"""
if sum(lengths) != len(ds):
raise ValueError(
"Sum of input lengths does not equal the length of the input dataset!"
)
idx_dataset = np.where(np.array(ds.cumulative_sizes) > lengths[0])[0][0]
train = ConcatDataset(ds.datasets[:idx_dataset])
test = ConcatDataset(ds.datasets[idx_dataset:])
return train, test
|
https://github.com/deepset-ai/FARM/issues/113
|
10/11/2019 17:12:47 - INFO - farm.data_handler.data_silo - Loading dev set as a slice of train set
Traceback (most recent call last):
File ".../train.py", line 436, in <module>
augmentation=True)
File ".../train.py", line 348, in continue_finetuning
data_silo = DataSilo(processor=processor, batch_size=batch_size, multiprocessing_chunk_size=2000)
File "/.../farm/data_handler/data_silo.py", line 49, in __init__
self._load_data()
File ".../farm/data_handler/data_silo.py", line 104, in _load_data
self._create_dev_from_train()
File ".../farm/data_handler/data_silo.py", line 175, in _create_dev_from_train
train_dataset, dev_dataset = self.random_split_ConcatDataset(self.data["train"], lengths=[n_train, n_dev])
File ".../farm/data_handler/data_silo.py", line 200, in random_split_ConcatDataset
train = ConcatDataset(ds.datasets[:idx_dataset])
File ".../torch/utils/data/dataset.py", line 68, in __init__
assert len(datasets) > 0, 'datasets should not be an empty iterable'
AssertionError: datasets should not be an empty iterable
|
AssertionError
|
def _dict_to_samples(self, dictionary, all_dicts=None):
assert len(all_dicts) > 1, (
"Need at least 2 documents to sample random sentences from"
)
doc = dictionary["doc"]
samples = []
for idx in range(len(doc) - 1):
text_a, text_b, is_next_label = get_sentence_pair(doc, all_dicts, idx)
sample_in_clear_text = {
"text_a": text_a,
"text_b": text_b,
"nextsentence_label": is_next_label,
}
tokenized = {}
tokenized["text_a"] = tokenize_with_metadata(
text_a, self.tokenizer, self.max_seq_len
)
tokenized["text_b"] = tokenize_with_metadata(
text_b, self.tokenizer, self.max_seq_len
)
samples.append(
Sample(id=None, clear_text=sample_in_clear_text, tokenized=tokenized)
)
return samples
|
def _dict_to_samples(self, dictionary, all_dicts=None):
doc = dictionary["doc"]
samples = []
for idx in range(len(doc) - 1):
text_a, text_b, is_next_label = get_sentence_pair(doc, all_dicts, idx)
sample_in_clear_text = {
"text_a": text_a,
"text_b": text_b,
"nextsentence_label": is_next_label,
}
tokenized = {}
tokenized["text_a"] = tokenize_with_metadata(
text_a, self.tokenizer, self.max_seq_len
)
tokenized["text_b"] = tokenize_with_metadata(
text_b, self.tokenizer, self.max_seq_len
)
samples.append(
Sample(id=None, clear_text=sample_in_clear_text, tokenized=tokenized)
)
return samples
|
https://github.com/deepset-ai/FARM/issues/113
|
10/11/2019 17:12:47 - INFO - farm.data_handler.data_silo - Loading dev set as a slice of train set
Traceback (most recent call last):
File ".../train.py", line 436, in <module>
augmentation=True)
File ".../train.py", line 348, in continue_finetuning
data_silo = DataSilo(processor=processor, batch_size=batch_size, multiprocessing_chunk_size=2000)
File "/.../farm/data_handler/data_silo.py", line 49, in __init__
self._load_data()
File ".../farm/data_handler/data_silo.py", line 104, in _load_data
self._create_dev_from_train()
File ".../farm/data_handler/data_silo.py", line 175, in _create_dev_from_train
train_dataset, dev_dataset = self.random_split_ConcatDataset(self.data["train"], lengths=[n_train, n_dev])
File ".../farm/data_handler/data_silo.py", line 200, in random_split_ConcatDataset
train = ConcatDataset(ds.datasets[:idx_dataset])
File ".../torch/utils/data/dataset.py", line 68, in __init__
assert len(datasets) > 0, 'datasets should not be an empty iterable'
AssertionError: datasets should not be an empty iterable
|
AssertionError
|
def eval(self, model):
"""
Performs evaluation on a given model.
:param model: The model on which to perform evaluation
:type model: AdaptiveModel
:return all_results: A list of dictionaries, one for each prediction head. Each dictionary contains the metrics
and reports generated during evaluation.
:rtype all_results: list of dicts
"""
model.eval()
# init empty lists per prediction head
loss_all = [0 for _ in model.prediction_heads]
preds_all = [[] for _ in model.prediction_heads]
label_all = [[] for _ in model.prediction_heads]
for step, batch in enumerate(
tqdm(self.data_loader, desc="Evaluating", mininterval=10)
):
batch = {key: batch[key].to(self.device) for key in batch}
with torch.no_grad():
logits = model.forward(**batch)
# TODO logits_to_loss should be a single, overloaded function
losses_per_head = model.logits_to_loss_per_head(logits=logits, **batch)
preds = model.logits_to_preds(logits=logits, **batch)
labels = model.prepare_labels(**batch)
# stack results of all batches per prediction head
for head_num, head in enumerate(model.prediction_heads):
loss_all[head_num] += np.sum(to_numpy(losses_per_head[head_num]))
preds_all[head_num] += list(to_numpy(preds[head_num]))
label_all[head_num] += list(to_numpy(labels[head_num]))
# Evaluate per prediction head
all_results = []
for head_num, head in enumerate(model.prediction_heads):
if head.model_type == "multilabel_text_classification":
# converting from string preds back to multi-hot encoding
from sklearn.preprocessing import MultiLabelBinarizer
mlb = MultiLabelBinarizer(classes=head.label_list)
# TODO check why .fit() should be called on predictions, rather than on labels
preds_all[head_num] = mlb.fit_transform(preds_all[head_num])
label_all[head_num] = mlb.transform(label_all[head_num])
result = {
"loss": loss_all[head_num] / len(self.data_loader.dataset),
"task_name": head.task_name,
}
result.update(
compute_metrics(
metric=head.metric,
preds=preds_all[head_num],
labels=label_all[head_num],
)
)
# Select type of report depending on prediction head output type
if self.classification_report:
if head.ph_output_type == "per_token":
report_fn = token_classification_report
elif head.ph_output_type == "per_sequence":
report_fn = classification_report
elif head.ph_output_type == "per_token_squad":
report_fn = lambda *args, **kwargs: "not Implemented"
elif head.ph_output_type == "per_sequence_continuous":
report_fn = r2_score
else:
raise NotImplementedError
# CHANGE PARAMETERS, not all report_fn accept digits
if head.ph_output_type in ["per_sequence_continuous", "per_token"]:
result["report"] = report_fn(label_all[head_num], preds_all[head_num])
else:
# supply labels as all possible combination because if ground truth labels do not cover
# all values in label_list (maybe dev set is small), the report will break
result["report"] = report_fn(
label_all[head_num],
preds_all[head_num],
digits=4,
labels=head.label_list,
target_names=head.label_list,
)
all_results.append(result)
return all_results
|
def eval(self, model):
"""
Performs evaluation on a given model.
:param model: The model on which to perform evaluation
:type model: AdaptiveModel
:return all_results: A list of dictionaries, one for each prediction head. Each dictionary contains the metrics
and reports generated during evaluation.
:rtype all_results: list of dicts
"""
model.eval()
# init empty lists per prediction head
loss_all = [0 for _ in model.prediction_heads]
preds_all = [[] for _ in model.prediction_heads]
label_all = [[] for _ in model.prediction_heads]
for step, batch in enumerate(
tqdm(self.data_loader, desc="Evaluating", mininterval=10)
):
batch = {key: batch[key].to(self.device) for key in batch}
with torch.no_grad():
logits = model.forward(**batch)
# TODO logits_to_loss should be a single, overloaded function
losses_per_head = model.logits_to_loss_per_head(logits=logits, **batch)
preds = model.logits_to_preds(logits=logits, **batch)
labels = model.prepare_labels(**batch)
# stack results of all batches per prediction head
for head_num, head in enumerate(model.prediction_heads):
loss_all[head_num] += np.sum(to_numpy(losses_per_head[head_num]))
preds_all[head_num] += list(to_numpy(preds[head_num]))
label_all[head_num] += list(to_numpy(labels[head_num]))
# Evaluate per prediction head
all_results = []
for head_num, head in enumerate(model.prediction_heads):
if head.model_type == "multilabel_text_classification":
# converting from string preds back to multi-hot encoding
from sklearn.preprocessing import MultiLabelBinarizer
mlb = MultiLabelBinarizer(classes=head.label_list)
preds_all[head_num] = mlb.fit_transform(preds_all[head_num])
label_all[head_num] = mlb.transform(label_all[head_num])
result = {
"loss": loss_all[head_num] / len(self.data_loader.dataset),
"task_name": head.task_name,
}
result.update(
compute_metrics(
metric=head.metric,
preds=preds_all[head_num],
labels=label_all[head_num],
)
)
# Select type of report depending on prediction head output type
if self.classification_report:
if head.ph_output_type == "per_token":
report_fn = token_classification_report
elif head.ph_output_type == "per_sequence":
report_fn = classification_report
elif head.ph_output_type == "per_token_squad":
report_fn = lambda *args, **kwargs: "not Implemented"
elif head.ph_output_type == "per_sequence_continuous":
report_fn = r2_score
else:
raise NotImplementedError
# CHANGE PARAMETERS, not all report_fn accept digits
if head.ph_output_type == "per_sequence_continuous":
result["report"] = report_fn(label_all[head_num], preds_all[head_num])
elif head.ph_output_type == "per_token":
result["report"] = report_fn(label_all[head_num], preds_all[head_num])
else:
# supply labels as all possible combination because if ground truth labels do not cover
# all values in label_list (maybe dev set is small), the report will break
result["report"] = report_fn(
label_all[head_num],
preds_all[head_num],
digits=4,
target_names=head.label_list,
)
all_results.append(result)
return all_results
|
https://github.com/deepset-ai/FARM/issues/113
|
10/11/2019 17:12:47 - INFO - farm.data_handler.data_silo - Loading dev set as a slice of train set
Traceback (most recent call last):
File ".../train.py", line 436, in <module>
augmentation=True)
File ".../train.py", line 348, in continue_finetuning
data_silo = DataSilo(processor=processor, batch_size=batch_size, multiprocessing_chunk_size=2000)
File "/.../farm/data_handler/data_silo.py", line 49, in __init__
self._load_data()
File ".../farm/data_handler/data_silo.py", line 104, in _load_data
self._create_dev_from_train()
File ".../farm/data_handler/data_silo.py", line 175, in _create_dev_from_train
train_dataset, dev_dataset = self.random_split_ConcatDataset(self.data["train"], lengths=[n_train, n_dev])
File ".../farm/data_handler/data_silo.py", line 200, in random_split_ConcatDataset
train = ConcatDataset(ds.datasets[:idx_dataset])
File ".../torch/utils/data/dataset.py", line 68, in __init__
assert len(datasets) > 0, 'datasets should not be an empty iterable'
AssertionError: datasets should not be an empty iterable
|
AssertionError
|
def __init__(
self, model, processor, batch_size=4, gpu=False, name=None, return_class_probs=False
):
"""
Initializes Inferencer from an AdaptiveModel and a Processor instance.
:param model: AdaptiveModel to run in inference mode
:type model: AdaptiveModel
:param processor: A dataset specific Processor object which will turn input (file or dict) into a Pytorch Dataset.
:type processor: Processor
:param batch_size: Number of samples computed once per batch
:type batch_size: int
:param gpu: If GPU shall be used
:type gpu: bool
:param name: Name for the current Inferencer model, displayed in the REST API
:type name: string
:param return_class_probs: either return probability distribution over all labels or the prob of the associated label
:type return_class_probs: bool
:return: An instance of the Inferencer.
"""
# Init device and distributed settings
device, n_gpu = initialize_device_settings(use_cuda=gpu, local_rank=-1, fp16=False)
self.processor = processor
self.model = model
self.model.eval()
self.batch_size = batch_size
self.device = device
self.language = self.model.language_model.language
# TODO adjust for multiple prediction heads
if len(self.model.prediction_heads) == 1:
self.prediction_type = self.model.prediction_heads[0].model_type
# self.label_map = self.processor.label_maps[0]
elif len(self.model.prediction_heads) == 0:
self.prediction_type = "embedder"
# else:
# raise NotImplementedError("A model with multiple prediction heads is currently not supported by the Inferencer")
self.name = name if name != None else f"anonymous-{self.prediction_type}"
self.return_class_probs = return_class_probs
model.connect_heads_with_processor(processor.tasks, require_labels=False)
set_all_seeds(42, n_gpu)
|
def __init__(
self,
model,
processor,
batch_size=4,
gpu=False,
name=None,
return_class_probs=False,
multiprocessing_chunk_size=100,
):
"""
Initializes Inferencer from an AdaptiveModel and a Processor instance.
:param model: AdaptiveModel to run in inference mode
:type model: AdaptiveModel
:param processor: A dataset specific Processor object which will turn input (file or dict) into a Pytorch Dataset.
:type processor: Processor
:param batch_size: Number of samples computed once per batch
:type batch_size: int
:param gpu: If GPU shall be used
:type gpu: bool
:param name: Name for the current Inferencer model, displayed in the REST API
:type name: string
:param return_class_probs: either return probability distribution over all labels or the prob of the associated label
:type return_class_probs: bool
:return: An instance of the Inferencer.
"""
# Init device and distributed settings
device, n_gpu = initialize_device_settings(use_cuda=gpu, local_rank=-1, fp16=False)
self.processor = processor
self.model = model
self.model.eval()
self.batch_size = batch_size
self.device = device
self.language = self.model.language_model.language
# TODO adjust for multiple prediction heads
if len(self.model.prediction_heads) == 1:
self.prediction_type = self.model.prediction_heads[0].model_type
# self.label_map = self.processor.label_maps[0]
elif len(self.model.prediction_heads) == 0:
self.prediction_type = "embedder"
# else:
# raise NotImplementedError("A model with multiple prediction heads is currently not supported by the Inferencer")
self.name = name if name != None else f"anonymous-{self.prediction_type}"
self.return_class_probs = return_class_probs
self.multiprocessing_chunk_size = multiprocessing_chunk_size
model.connect_heads_with_processor(processor.tasks, require_labels=False)
set_all_seeds(42, n_gpu)
|
https://github.com/deepset-ai/FARM/issues/113
|
10/11/2019 17:12:47 - INFO - farm.data_handler.data_silo - Loading dev set as a slice of train set
Traceback (most recent call last):
File ".../train.py", line 436, in <module>
augmentation=True)
File ".../train.py", line 348, in continue_finetuning
data_silo = DataSilo(processor=processor, batch_size=batch_size, multiprocessing_chunk_size=2000)
File "/.../farm/data_handler/data_silo.py", line 49, in __init__
self._load_data()
File ".../farm/data_handler/data_silo.py", line 104, in _load_data
self._create_dev_from_train()
File ".../farm/data_handler/data_silo.py", line 175, in _create_dev_from_train
train_dataset, dev_dataset = self.random_split_ConcatDataset(self.data["train"], lengths=[n_train, n_dev])
File ".../farm/data_handler/data_silo.py", line 200, in random_split_ConcatDataset
train = ConcatDataset(ds.datasets[:idx_dataset])
File ".../torch/utils/data/dataset.py", line 68, in __init__
assert len(datasets) > 0, 'datasets should not be an empty iterable'
AssertionError: datasets should not be an empty iterable
|
AssertionError
|
def load(
cls,
load_dir,
batch_size=4,
gpu=False,
embedder_only=False,
return_class_probs=False,
):
"""
Initializes Inferencer from directory with saved model.
:param load_dir: Directory where the saved model is located.
:type load_dir: str
:param batch_size: Number of samples computed once per batch
:type batch_size: int
:param gpu: If GPU shall be used
:type gpu: bool
:param embedder_only: If true, a faster processor (InferenceProcessor) is loaded. This should only be used
for extracting embeddings (no downstream predictions).
:type embedder_only: bool
:return: An instance of the Inferencer.
"""
device, n_gpu = initialize_device_settings(use_cuda=gpu, local_rank=-1, fp16=False)
model = AdaptiveModel.load(load_dir, device)
if embedder_only:
# model.prediction_heads = []
processor = InferenceProcessor.load_from_dir(load_dir)
else:
processor = Processor.load_from_dir(load_dir)
name = os.path.basename(load_dir)
return cls(
model,
processor,
batch_size=batch_size,
gpu=gpu,
name=name,
return_class_probs=return_class_probs,
)
|
def load(
cls,
load_dir,
batch_size=4,
gpu=False,
embedder_only=False,
return_class_probs=False,
multiprocessing_chunk_size=100,
):
"""
Initializes Inferencer from directory with saved model.
:param load_dir: Directory where the saved model is located.
:type load_dir: str
:param batch_size: Number of samples computed once per batch
:type batch_size: int
:param gpu: If GPU shall be used
:type gpu: bool
:param embedder_only: If true, a faster processor (InferenceProcessor) is loaded. This should only be used
for extracting embeddings (no downstream predictions).
:type embedder_only: bool
:param multiprocessing_chunk_size: chunksize param for Python Multiprocessing imap().
:type multiprocessing_chunk_size: int
:return: An instance of the Inferencer.
"""
device, n_gpu = initialize_device_settings(use_cuda=gpu, local_rank=-1, fp16=False)
model = AdaptiveModel.load(load_dir, device)
if embedder_only:
# model.prediction_heads = []
processor = InferenceProcessor.load_from_dir(load_dir)
else:
processor = Processor.load_from_dir(load_dir)
name = os.path.basename(load_dir)
return cls(
model,
processor,
batch_size=batch_size,
gpu=gpu,
name=name,
return_class_probs=return_class_probs,
multiprocessing_chunk_size=multiprocessing_chunk_size,
)
|
https://github.com/deepset-ai/FARM/issues/113
|
10/11/2019 17:12:47 - INFO - farm.data_handler.data_silo - Loading dev set as a slice of train set
Traceback (most recent call last):
File ".../train.py", line 436, in <module>
augmentation=True)
File ".../train.py", line 348, in continue_finetuning
data_silo = DataSilo(processor=processor, batch_size=batch_size, multiprocessing_chunk_size=2000)
File "/.../farm/data_handler/data_silo.py", line 49, in __init__
self._load_data()
File ".../farm/data_handler/data_silo.py", line 104, in _load_data
self._create_dev_from_train()
File ".../farm/data_handler/data_silo.py", line 175, in _create_dev_from_train
train_dataset, dev_dataset = self.random_split_ConcatDataset(self.data["train"], lengths=[n_train, n_dev])
File ".../farm/data_handler/data_silo.py", line 200, in random_split_ConcatDataset
train = ConcatDataset(ds.datasets[:idx_dataset])
File ".../torch/utils/data/dataset.py", line 68, in __init__
assert len(datasets) > 0, 'datasets should not be an empty iterable'
AssertionError: datasets should not be an empty iterable
|
AssertionError
|
def inference_from_dicts(self, dicts, rest_api_schema=False):
"""
Runs down-stream inference using the prediction head.
:param dicts: Samples to run inference on provided as a list of dicts. One dict per sample.
:type dicts: [dict]
:param rest_api_schema: whether conform to the schema used for dicts in the HTTP API for Inference.
:type rest_api_schema: bool
:return: dict of predictions
"""
if self.prediction_type == "embedder":
raise TypeError(
"You have called inference_from_dicts for a model without any prediction head! "
"If you want to: "
"a) ... extract vectors from the language model: call `Inferencer.extract_vectors(...)`"
f"b) ... run inference on a downstream task: make sure your model path {self.name} contains a saved prediction head"
)
num_cpus = mp.cpu_count() or 1
dicts_per_cpu = np.ceil(len(dicts) / num_cpus)
# automatic adjustment of multiprocessing chunksize
# for small files (containing few dicts) we want small chunksize to ulitize all available cores but never less
# than 2, because we need it to sample another random sentence in LM finetuning
# for large files we want to minimize processor spawning without giving too much data to one process, so we
# clip it at 5k
multiprocessing_chunk_size = int(
np.clip((np.ceil(dicts_per_cpu / 5)), a_min=2, a_max=5000)
)
dict_batches_to_process = int(len(dicts) / multiprocessing_chunk_size)
num_cpus_used = min(mp.cpu_count(), dict_batches_to_process) or 1
with ExitStack() as stack:
p = stack.enter_context(mp.Pool(processes=num_cpus_used))
logger.info(
f"Got ya {num_cpus_used} parallel workers to do inference on {len(dicts)}dicts (chunksize = {multiprocessing_chunk_size})..."
)
log_ascii_workers(num_cpus_used, logger)
results = p.imap(
partial(
self._multiproc,
processor=self.processor,
rest_api_schema=rest_api_schema,
),
grouper(dicts, multiprocessing_chunk_size),
1,
)
preds_all = []
with tqdm(total=len(dicts), unit=" Dicts") as pbar:
for dataset, tensor_names, sample in results:
preds_all.extend(self._run_inference(dataset, tensor_names, sample))
pbar.update(multiprocessing_chunk_size)
return preds_all
|
def inference_from_dicts(self, dicts, rest_api_schema=False):
"""
Runs down-stream inference using the prediction head.
:param dicts: Samples to run inference on provided as a list of dicts. One dict per sample.
:type dicts: [dict]
:param rest_api_schema: whether conform to the schema used for dicts in the HTTP API for Inference.
:type rest_api_schema: bool
:return: dict of predictions
"""
if self.prediction_type == "embedder":
raise TypeError(
"You have called inference_from_dicts for a model without any prediction head! "
"If you want to: "
"a) ... extract vectors from the language model: call `Inferencer.extract_vectors(...)`"
f"b) ... run inference on a downstream task: make sure your model path {self.name} contains a saved prediction head"
)
dict_batches_to_process = int(len(dicts) / self.multiprocessing_chunk_size)
num_cpus = min(mp.cpu_count(), dict_batches_to_process) or 1
with ExitStack() as stack:
p = stack.enter_context(mp.Pool(processes=num_cpus))
logger.info(
f"Got ya {num_cpus} parallel workers to do inference on {len(dicts)}dicts (chunksize = {self.multiprocessing_chunk_size})..."
)
log_ascii_workers(num_cpus, logger)
results = p.imap(
partial(
self._multiproc,
processor=self.processor,
rest_api_schema=rest_api_schema,
),
grouper(dicts, self.multiprocessing_chunk_size),
1,
)
preds_all = []
with tqdm(total=len(dicts), unit=" Dicts") as pbar:
for dataset, tensor_names, sample in results:
preds_all.extend(self._run_inference(dataset, tensor_names, sample))
pbar.update(self.multiprocessing_chunk_size)
return preds_all
|
https://github.com/deepset-ai/FARM/issues/113
|
10/11/2019 17:12:47 - INFO - farm.data_handler.data_silo - Loading dev set as a slice of train set
Traceback (most recent call last):
File ".../train.py", line 436, in <module>
augmentation=True)
File ".../train.py", line 348, in continue_finetuning
data_silo = DataSilo(processor=processor, batch_size=batch_size, multiprocessing_chunk_size=2000)
File "/.../farm/data_handler/data_silo.py", line 49, in __init__
self._load_data()
File ".../farm/data_handler/data_silo.py", line 104, in _load_data
self._create_dev_from_train()
File ".../farm/data_handler/data_silo.py", line 175, in _create_dev_from_train
train_dataset, dev_dataset = self.random_split_ConcatDataset(self.data["train"], lengths=[n_train, n_dev])
File ".../farm/data_handler/data_silo.py", line 200, in random_split_ConcatDataset
train = ConcatDataset(ds.datasets[:idx_dataset])
File ".../torch/utils/data/dataset.py", line 68, in __init__
assert len(datasets) > 0, 'datasets should not be an empty iterable'
AssertionError: datasets should not be an empty iterable
|
AssertionError
|
def __init__(
self,
tokenizer,
max_seq_len,
label_list,
metrics,
train_filename,
dev_filename,
test_filename,
dev_split,
data_dir,
label_dtype=torch.long,
multiprocessing_chunk_size=1_000,
max_processes=128,
share_all_baskets_for_multiprocessing=False,
use_multiprocessing=True,
):
"""
Initialize a generic Processor
:param tokenizer: Used to split a sentence (str) into tokens.
:param max_seq_len: Samples are truncated after this many tokens.
:type max_seq_len: int
:param label_list: List of all unique target labels.
:type label_list: list
:param metrics: The metric used for evaluation, one per prediction head.
Choose from mcc, acc, acc_f1, pear_spear, seq_f1, f1_macro, squad.
:type metrics: list or str
:param train_filename: The name of the file containing training data.
:type train_filename: str
:param dev_filename: The name of the file containing the dev data. If None and 0.0 < dev_split < 1.0 the dev set
will be a slice of the train set.
:type dev_filename: str or None
:param test_filename: The name of the file containing test data.
:type test_filename: str
:param dev_split: The proportion of the train set that will sliced. Only works if dev_filename is set to None
:type dev_split: float
:param data_dir: The directory in which the train, test and perhaps dev files can be found.
:type data_dir: str
:param label_dtype: The torch dtype for the labels.
:param max_processes: maximum number of processing to use for Multiprocessing.
:type max_processes: int
"""
# The Multiprocessing functions in the Class are classmethods to avoid passing(and pickling) of class-objects
# that are very large in size(eg, self.baskets). Since classmethods have access to only class attributes, all
# objects required in Multiprocessing must be set as class attributes.
Processor.tokenizer = tokenizer
Processor.max_seq_len = max_seq_len
Processor.label_list = label_list
# data sets
self.train_filename = train_filename
self.dev_filename = dev_filename
self.test_filename = test_filename
self.dev_split = dev_split
self.data_dir = data_dir
# labels
self.label_dtype = label_dtype
self.label_maps = []
# multiprocessing
if os.name == "nt":
self.use_multiprocessing = (
False # the mp code here isn't compatible with Windows
)
else:
self.use_multiprocessing = use_multiprocessing
self.multiprocessing_chunk_size = multiprocessing_chunk_size
self.share_all_baskets_for_multiprocessing = share_all_baskets_for_multiprocessing
self.max_processes = max_processes
# others
self.metrics = [metrics] if isinstance(metrics, str) else metrics
# create label maps (one per prediction head)
if any(isinstance(i, list) for i in label_list):
for labels_per_head in label_list:
map = {i: label for i, label in enumerate(labels_per_head)}
self.label_maps.append(map)
else:
map = {i: label for i, label in enumerate(label_list)}
self.label_maps.append(map)
self.baskets = []
self._log_params()
|
def __init__(
self,
tokenizer,
max_seq_len,
label_list,
metrics,
train_filename,
dev_filename,
test_filename,
dev_split,
data_dir,
label_dtype=torch.long,
multiprocessing_chunk_size=1_000,
max_processes=128,
share_all_baskets_for_multiprocessing=False,
):
"""
Initialize a generic Processor
:param tokenizer: Used to split a sentence (str) into tokens.
:param max_seq_len: Samples are truncated after this many tokens.
:type max_seq_len: int
:param label_list: List of all unique target labels.
:type label_list: list
:param metrics: The metric used for evaluation, one per prediction head.
Choose from mcc, acc, acc_f1, pear_spear, seq_f1, f1_macro, squad.
:type metrics: list or str
:param train_filename: The name of the file containing training data.
:type train_filename: str
:param dev_filename: The name of the file containing the dev data. If None and 0.0 < dev_split < 1.0 the dev set
will be a slice of the train set.
:type dev_filename: str or None
:param test_filename: The name of the file containing test data.
:type test_filename: str
:param dev_split: The proportion of the train set that will sliced. Only works if dev_filename is set to None
:type dev_split: float
:param data_dir: The directory in which the train, test and perhaps dev files can be found.
:type data_dir: str
:param label_dtype: The torch dtype for the labels.
:param max_processes: maximum number of processing to use for Multiprocessing.
:type max_processes: int
"""
# The Multiprocessing functions in the Class are classmethods to avoid passing(and pickling) of class-objects
# that are very large in size(eg, self.baskets). Since classmethods have access to only class attributes, all
# objects required in Multiprocessing must be set as class attributes.
Processor.tokenizer = tokenizer
Processor.max_seq_len = max_seq_len
Processor.label_list = label_list
# data sets
self.train_filename = train_filename
self.dev_filename = dev_filename
self.test_filename = test_filename
self.dev_split = dev_split
self.data_dir = data_dir
# labels
self.label_dtype = label_dtype
self.label_maps = []
# multiprocessing
self.multiprocessing_chunk_size = multiprocessing_chunk_size
self.share_all_baskets_for_multiprocessing = share_all_baskets_for_multiprocessing
self.max_processes = max_processes
# others
self.metrics = [metrics] if isinstance(metrics, str) else metrics
# create label maps (one per prediction head)
if any(isinstance(i, list) for i in label_list):
for labels_per_head in label_list:
map = {i: label for i, label in enumerate(labels_per_head)}
self.label_maps.append(map)
else:
map = {i: label for i, label in enumerate(label_list)}
self.label_maps.append(map)
self.baskets = []
self._log_params()
|
https://github.com/deepset-ai/FARM/issues/70
|
08/28/2019 07:47:35 - INFO - farm.utils - device: cuda n_gpu: 1, distributed training: False, 16-bits training: False
08/28/2019 07:47:35 - INFO - pytorch_transformers.tokenization_utils - loading file https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt from cache at C:\Users\JulianGerhard\.cache\torch\pytorch_transformers\da299cdd121a3d71e1626f2908dda0d02658f42e925a3d6abd8273ec08cf41a6.2a48e6c60dcdb582effb718237ce5894652e3b4abb94f0a4d9a857b70333308d
08/28/2019 07:47:35 - INFO - farm.data_handler.data_silo -
Loading data into the data silo ...
______
|o | !
__ |:`_|---'-.
|__|______.-/ _ \-----.|
(o)(o)------'\ _ / ( )
08/28/2019 07:47:35 - INFO - farm.data_handler.data_silo - Loading train set from: data/conll03-de\train.txt
08/28/2019 07:47:35 - INFO - farm.data_handler.utils - Couldn't find data/conll03-de\train.txt locally. Trying to download ...
08/28/2019 07:47:35 - INFO - farm.data_handler.utils - downloading and extracting file C:\Users\JulianGerhard\PycharmProjects\word_embeddings\ner_finetuning\data\conll03-de to dir
08/28/2019 07:47:39 - INFO - farm.data_handler.processor - Got ya 8 parallel workers to fill the baskets with samples (chunksize = 1000)...
0%| | 0/24000 [00:00<?, ?it/s]multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 44, in mapstar
return list(map(*args))
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\processor.py", line 310, in _multiproc_sample
samples = cls._dict_to_samples(dict=basket.raw, all_dicts=all_dicts)
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\processor.py", line 555, in _dict_to_samples
tokenized = tokenize_with_metadata(dict["text"], cls.tokenizer, cls.max_seq_len)
AttributeError: type object 'NERProcessor' has no attribute 'tokenizer'
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:/Users/JulianGerhard/PycharmProjects/word_embeddings/ner_finetuning/farm_experimen t.py", line 6, in <module>
run_experiment(experiments[0])
File "c:\users\juliangerhard\pycharmprojects\farm\farm\experiment.py", line 87, in run_experiment
distributed=distributed,
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\data_silo.py", line 39, in __init__
self._load_data()
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\data_silo.py", line 47, in _load_data
self.data["train"], self.tensor_names = self.processor.dataset_from_file(train_file)
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\processor.py", line 366, in dataset_from_file
self._init_samples_in_baskets()
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\processor.py", line 304, in _init_samples_in_baskets
zip(samples, self.baskets), total=len(self.baskets)
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\site-packages\tqdm\_tqdm.py", line 1034, in __iter__
for obj in iterable:
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 320, in <genexpr>
return (item for chunk in result for item in chunk)
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 735, in next
raise value
AttributeError: type object 'NERProcessor' has no attribute 'tokenizer'
0%| | 0/24000 [00:06<?, ?it/s]
|
AttributeError
|
def _init_samples_in_baskets(self):
with ExitStack() as stack:
if self.use_multiprocessing:
chunks_to_process = int(len(self.baskets) / self.multiprocessing_chunk_size)
num_cpus = min(mp.cpu_count(), self.max_processes, chunks_to_process) or 1
logger.info(
f"Got ya {num_cpus} parallel workers to fill the baskets with samples (chunksize = {self.multiprocessing_chunk_size})..."
)
p = stack.enter_context(mp.Pool(processes=num_cpus))
manager = stack.enter_context(mp.Manager())
if self.share_all_baskets_for_multiprocessing:
all_dicts = manager.list([b.raw for b in self.baskets])
else:
all_dicts = None
samples = p.imap(
partial(self._multiproc_sample, all_dicts=all_dicts),
self.baskets,
chunksize=self.multiprocessing_chunk_size,
)
else:
all_dicts = [b.raw for b in self.baskets]
samples = map(
partial(self._multiproc_sample, all_dicts=all_dicts), self.baskets
)
for s, b in tqdm(zip(samples, self.baskets), total=len(self.baskets)):
b.samples = s
|
def _init_samples_in_baskets(self):
chunks_to_process = int(len(self.baskets) / self.multiprocessing_chunk_size)
num_cpus = min(mp.cpu_count(), self.max_processes, chunks_to_process) or 1
logger.info(
f"Got ya {num_cpus} parallel workers to fill the baskets with samples (chunksize = {self.multiprocessing_chunk_size})..."
)
with mp.Pool(processes=num_cpus) as p:
with mp.Manager() as manager:
if self.share_all_baskets_for_multiprocessing:
all_dicts = manager.list([b.raw for b in self.baskets])
else:
all_dicts = None
with mp.Pool(processes=num_cpus) as p:
samples = p.imap(
partial(self._multiproc_sample, all_dicts=all_dicts),
self.baskets,
chunksize=self.multiprocessing_chunk_size,
)
for s, b in tqdm(zip(samples, self.baskets), total=len(self.baskets)):
b.samples = s
|
https://github.com/deepset-ai/FARM/issues/70
|
08/28/2019 07:47:35 - INFO - farm.utils - device: cuda n_gpu: 1, distributed training: False, 16-bits training: False
08/28/2019 07:47:35 - INFO - pytorch_transformers.tokenization_utils - loading file https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt from cache at C:\Users\JulianGerhard\.cache\torch\pytorch_transformers\da299cdd121a3d71e1626f2908dda0d02658f42e925a3d6abd8273ec08cf41a6.2a48e6c60dcdb582effb718237ce5894652e3b4abb94f0a4d9a857b70333308d
08/28/2019 07:47:35 - INFO - farm.data_handler.data_silo -
Loading data into the data silo ...
______
|o | !
__ |:`_|---'-.
|__|______.-/ _ \-----.|
(o)(o)------'\ _ / ( )
08/28/2019 07:47:35 - INFO - farm.data_handler.data_silo - Loading train set from: data/conll03-de\train.txt
08/28/2019 07:47:35 - INFO - farm.data_handler.utils - Couldn't find data/conll03-de\train.txt locally. Trying to download ...
08/28/2019 07:47:35 - INFO - farm.data_handler.utils - downloading and extracting file C:\Users\JulianGerhard\PycharmProjects\word_embeddings\ner_finetuning\data\conll03-de to dir
08/28/2019 07:47:39 - INFO - farm.data_handler.processor - Got ya 8 parallel workers to fill the baskets with samples (chunksize = 1000)...
0%| | 0/24000 [00:00<?, ?it/s]multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 44, in mapstar
return list(map(*args))
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\processor.py", line 310, in _multiproc_sample
samples = cls._dict_to_samples(dict=basket.raw, all_dicts=all_dicts)
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\processor.py", line 555, in _dict_to_samples
tokenized = tokenize_with_metadata(dict["text"], cls.tokenizer, cls.max_seq_len)
AttributeError: type object 'NERProcessor' has no attribute 'tokenizer'
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:/Users/JulianGerhard/PycharmProjects/word_embeddings/ner_finetuning/farm_experimen t.py", line 6, in <module>
run_experiment(experiments[0])
File "c:\users\juliangerhard\pycharmprojects\farm\farm\experiment.py", line 87, in run_experiment
distributed=distributed,
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\data_silo.py", line 39, in __init__
self._load_data()
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\data_silo.py", line 47, in _load_data
self.data["train"], self.tensor_names = self.processor.dataset_from_file(train_file)
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\processor.py", line 366, in dataset_from_file
self._init_samples_in_baskets()
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\processor.py", line 304, in _init_samples_in_baskets
zip(samples, self.baskets), total=len(self.baskets)
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\site-packages\tqdm\_tqdm.py", line 1034, in __iter__
for obj in iterable:
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 320, in <genexpr>
return (item for chunk in result for item in chunk)
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 735, in next
raise value
AttributeError: type object 'NERProcessor' has no attribute 'tokenizer'
0%| | 0/24000 [00:06<?, ?it/s]
|
AttributeError
|
def _featurize_samples(self):
with ExitStack() as stack:
if self.use_multiprocessing:
chunks_to_process = int(len(self.baskets) / self.multiprocessing_chunk_size)
num_cpus = min(mp.cpu_count(), self.max_processes, chunks_to_process) or 1
logger.info(
f"Got ya {num_cpus} parallel workers to featurize samples in baskets (chunksize = {self.multiprocessing_chunk_size}) ..."
)
p = stack.enter_context(mp.Pool(processes=num_cpus))
all_features_gen = p.imap(
self._multiproc_featurize,
self.baskets,
chunksize=self.multiprocessing_chunk_size,
)
for basket_features, basket in tqdm(
zip(all_features_gen, self.baskets), total=len(self.baskets)
):
for f, s in zip(basket_features, basket.samples):
s.features = f
else:
all_features_gen = map(self._multiproc_featurize, self.baskets)
for basket_features, basket in tqdm(
zip(all_features_gen, self.baskets), total=len(self.baskets)
):
for f, s in zip(basket_features, basket.samples):
s.features = f
|
def _featurize_samples(self):
chunks_to_process = int(len(self.baskets) / self.multiprocessing_chunk_size)
num_cpus = min(mp.cpu_count(), self.max_processes, chunks_to_process) or 1
logger.info(
f"Got ya {num_cpus} parallel workers to featurize samples in baskets (chunksize = {self.multiprocessing_chunk_size}) ..."
)
with mp.Pool(processes=num_cpus) as p:
all_features_gen = p.imap(
self._multiproc_featurize,
self.baskets,
chunksize=self.multiprocessing_chunk_size,
)
for basket_features, basket in tqdm(
zip(all_features_gen, self.baskets), total=len(self.baskets)
):
for f, s in zip(basket_features, basket.samples):
s.features = f
|
https://github.com/deepset-ai/FARM/issues/70
|
08/28/2019 07:47:35 - INFO - farm.utils - device: cuda n_gpu: 1, distributed training: False, 16-bits training: False
08/28/2019 07:47:35 - INFO - pytorch_transformers.tokenization_utils - loading file https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt from cache at C:\Users\JulianGerhard\.cache\torch\pytorch_transformers\da299cdd121a3d71e1626f2908dda0d02658f42e925a3d6abd8273ec08cf41a6.2a48e6c60dcdb582effb718237ce5894652e3b4abb94f0a4d9a857b70333308d
08/28/2019 07:47:35 - INFO - farm.data_handler.data_silo -
Loading data into the data silo ...
______
|o | !
__ |:`_|---'-.
|__|______.-/ _ \-----.|
(o)(o)------'\ _ / ( )
08/28/2019 07:47:35 - INFO - farm.data_handler.data_silo - Loading train set from: data/conll03-de\train.txt
08/28/2019 07:47:35 - INFO - farm.data_handler.utils - Couldn't find data/conll03-de\train.txt locally. Trying to download ...
08/28/2019 07:47:35 - INFO - farm.data_handler.utils - downloading and extracting file C:\Users\JulianGerhard\PycharmProjects\word_embeddings\ner_finetuning\data\conll03-de to dir
08/28/2019 07:47:39 - INFO - farm.data_handler.processor - Got ya 8 parallel workers to fill the baskets with samples (chunksize = 1000)...
0%| | 0/24000 [00:00<?, ?it/s]multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 44, in mapstar
return list(map(*args))
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\processor.py", line 310, in _multiproc_sample
samples = cls._dict_to_samples(dict=basket.raw, all_dicts=all_dicts)
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\processor.py", line 555, in _dict_to_samples
tokenized = tokenize_with_metadata(dict["text"], cls.tokenizer, cls.max_seq_len)
AttributeError: type object 'NERProcessor' has no attribute 'tokenizer'
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:/Users/JulianGerhard/PycharmProjects/word_embeddings/ner_finetuning/farm_experimen t.py", line 6, in <module>
run_experiment(experiments[0])
File "c:\users\juliangerhard\pycharmprojects\farm\farm\experiment.py", line 87, in run_experiment
distributed=distributed,
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\data_silo.py", line 39, in __init__
self._load_data()
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\data_silo.py", line 47, in _load_data
self.data["train"], self.tensor_names = self.processor.dataset_from_file(train_file)
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\processor.py", line 366, in dataset_from_file
self._init_samples_in_baskets()
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\processor.py", line 304, in _init_samples_in_baskets
zip(samples, self.baskets), total=len(self.baskets)
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\site-packages\tqdm\_tqdm.py", line 1034, in __iter__
for obj in iterable:
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 320, in <genexpr>
return (item for chunk in result for item in chunk)
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 735, in next
raise value
AttributeError: type object 'NERProcessor' has no attribute 'tokenizer'
0%| | 0/24000 [00:06<?, ?it/s]
|
AttributeError
|
def _featurize_samples(self):
try:
if "train" in self.baskets[0].id:
train_labels = []
for basket in self.baskets:
for sample in basket.samples:
train_labels.append(sample.clear_text["label"])
scaler = StandardScaler()
scaler.fit(np.reshape(train_labels, (-1, 1)))
self.label_list = [scaler.mean_.item(), scaler.scale_.item()]
# Create label_maps because featurize is called after Processor instantiation
self.label_maps = [{0: scaler.mean_.item(), 1: scaler.scale_.item()}]
except Exception as e:
logger.warning(f"Baskets not found: {e}")
with ExitStack() as stack:
if self.use_multiprocessing:
chunks_to_process = int(len(self.baskets) / self.multiprocessing_chunk_size)
num_cpus = min(mp.cpu_count(), self.max_processes, chunks_to_process) or 1
logger.info(
f"Got ya {num_cpus} parallel workers to featurize samples in baskets (chunksize = {self.multiprocessing_chunk_size}) ..."
)
p = stack.enter_context(mp.Pool(processes=num_cpus))
all_features_gen = p.imap(
self._multiproc_featurize,
self.baskets,
chunksize=self.multiprocessing_chunk_size,
)
else:
all_features_gen = map(self._multiproc_featurize, self.baskets)
for basket_features, basket in tqdm(
zip(all_features_gen, self.baskets), total=len(self.baskets)
):
for f, s in zip(basket_features, basket.samples):
# Samples don't have labels during Inference mode
if "label" in s.clear_text:
label = s.clear_text["label"]
scaled_label = (label - self.label_list[0]) / self.label_list[1]
f[0]["label_ids"] = scaled_label
s.features = f
|
def _featurize_samples(self):
chunks_to_process = int(len(self.baskets) / self.multiprocessing_chunk_size)
num_cpus = min(mp.cpu_count(), self.max_processes, chunks_to_process) or 1
logger.info(
f"Got ya {num_cpus} parallel workers to featurize samples in baskets (chunksize = {self.multiprocessing_chunk_size}) ..."
)
try:
if "train" in self.baskets[0].id:
train_labels = []
for basket in self.baskets:
for sample in basket.samples:
train_labels.append(sample.clear_text["label"])
scaler = StandardScaler()
scaler.fit(np.reshape(train_labels, (-1, 1)))
self.label_list = [scaler.mean_.item(), scaler.scale_.item()]
# Create label_maps because featurize is called after Processor instantiation
self.label_maps = [{0: scaler.mean_.item(), 1: scaler.scale_.item()}]
except Exception as e:
logger.warning(f"Baskets not found: {e}")
with mp.Pool(processes=num_cpus) as p:
all_features_gen = p.imap(
self._multiproc_featurize,
self.baskets,
chunksize=self.multiprocessing_chunk_size,
)
for basket_features, basket in tqdm(
zip(all_features_gen, self.baskets), total=len(self.baskets)
):
for f, s in zip(basket_features, basket.samples):
# Samples don't have labels during Inference mode
if "label" in s.clear_text:
label = s.clear_text["label"]
scaled_label = (label - self.label_list[0]) / self.label_list[1]
f[0]["label_ids"] = scaled_label
s.features = f
|
https://github.com/deepset-ai/FARM/issues/70
|
08/28/2019 07:47:35 - INFO - farm.utils - device: cuda n_gpu: 1, distributed training: False, 16-bits training: False
08/28/2019 07:47:35 - INFO - pytorch_transformers.tokenization_utils - loading file https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt from cache at C:\Users\JulianGerhard\.cache\torch\pytorch_transformers\da299cdd121a3d71e1626f2908dda0d02658f42e925a3d6abd8273ec08cf41a6.2a48e6c60dcdb582effb718237ce5894652e3b4abb94f0a4d9a857b70333308d
08/28/2019 07:47:35 - INFO - farm.data_handler.data_silo -
Loading data into the data silo ...
______
|o | !
__ |:`_|---'-.
|__|______.-/ _ \-----.|
(o)(o)------'\ _ / ( )
08/28/2019 07:47:35 - INFO - farm.data_handler.data_silo - Loading train set from: data/conll03-de\train.txt
08/28/2019 07:47:35 - INFO - farm.data_handler.utils - Couldn't find data/conll03-de\train.txt locally. Trying to download ...
08/28/2019 07:47:35 - INFO - farm.data_handler.utils - downloading and extracting file C:\Users\JulianGerhard\PycharmProjects\word_embeddings\ner_finetuning\data\conll03-de to dir
08/28/2019 07:47:39 - INFO - farm.data_handler.processor - Got ya 8 parallel workers to fill the baskets with samples (chunksize = 1000)...
0%| | 0/24000 [00:00<?, ?it/s]multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 44, in mapstar
return list(map(*args))
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\processor.py", line 310, in _multiproc_sample
samples = cls._dict_to_samples(dict=basket.raw, all_dicts=all_dicts)
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\processor.py", line 555, in _dict_to_samples
tokenized = tokenize_with_metadata(dict["text"], cls.tokenizer, cls.max_seq_len)
AttributeError: type object 'NERProcessor' has no attribute 'tokenizer'
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:/Users/JulianGerhard/PycharmProjects/word_embeddings/ner_finetuning/farm_experimen t.py", line 6, in <module>
run_experiment(experiments[0])
File "c:\users\juliangerhard\pycharmprojects\farm\farm\experiment.py", line 87, in run_experiment
distributed=distributed,
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\data_silo.py", line 39, in __init__
self._load_data()
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\data_silo.py", line 47, in _load_data
self.data["train"], self.tensor_names = self.processor.dataset_from_file(train_file)
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\processor.py", line 366, in dataset_from_file
self._init_samples_in_baskets()
File "c:\users\juliangerhard\pycharmprojects\farm\farm\data_handler\processor.py", line 304, in _init_samples_in_baskets
zip(samples, self.baskets), total=len(self.baskets)
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\site-packages\tqdm\_tqdm.py", line 1034, in __iter__
for obj in iterable:
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 320, in <genexpr>
return (item for chunk in result for item in chunk)
File "C:\Users\JulianGerhard\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 735, in next
raise value
AttributeError: type object 'NERProcessor' has no attribute 'tokenizer'
0%| | 0/24000 [00:06<?, ?it/s]
|
AttributeError
|
def processSubscribe(self, session, subscribe):
"""
Implements :func:`crossbar.router.interfaces.IBroker.processSubscribe`
"""
if self._router.is_traced:
if not subscribe.correlation_id:
subscribe.correlation_id = self._router.new_correlation_id()
subscribe.correlation_is_anchor = True
subscribe.correlation_is_last = False
if not subscribe.correlation_uri:
subscribe.correlation_uri = subscribe.topic
self._router._factory._worker._maybe_trace_rx_msg(session, subscribe)
# check topic URI: for SUBSCRIBE, must be valid URI (either strict or loose), and all
# URI components must be non-empty for normal subscriptions, may be empty for
# wildcard subscriptions and must be non-empty for all but the last component for
# prefix subscriptions
#
if self._option_uri_strict:
if subscribe.match == "wildcard":
uri_is_valid = _URI_PAT_STRICT_EMPTY.match(subscribe.topic)
elif subscribe.match == "prefix":
uri_is_valid = _URI_PAT_STRICT_LAST_EMPTY.match(subscribe.topic)
else:
uri_is_valid = _URI_PAT_STRICT_NON_EMPTY.match(subscribe.topic)
else:
if subscribe.match == "wildcard":
uri_is_valid = _URI_PAT_LOOSE_EMPTY.match(subscribe.topic)
elif subscribe.match == "prefix":
uri_is_valid = _URI_PAT_LOOSE_LAST_EMPTY.match(subscribe.topic)
else:
uri_is_valid = _URI_PAT_LOOSE_NON_EMPTY.match(subscribe.topic)
if not uri_is_valid:
reply = message.Error(
message.Subscribe.MESSAGE_TYPE,
subscribe.request,
ApplicationError.INVALID_URI,
["subscribe for invalid topic URI '{0}'".format(subscribe.topic)],
)
reply.correlation_id = subscribe.correlation_id
reply.correlation_uri = subscribe.topic
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# authorize SUBSCRIBE action
#
d = self._router.authorize(
session, subscribe.topic, "subscribe", options=subscribe.marshal_options()
)
def on_authorize_success(authorization):
if not authorization["allow"]:
# error reply since session is not authorized to subscribe
#
replies = [
message.Error(
message.Subscribe.MESSAGE_TYPE,
subscribe.request,
ApplicationError.NOT_AUTHORIZED,
[
"session is not authorized to subscribe to topic '{0}'".format(
subscribe.topic
)
],
)
]
replies[0].correlation_id = subscribe.correlation_id
replies[0].correlation_uri = subscribe.topic
replies[0].correlation_is_anchor = False
replies[0].correlation_is_last = True
else:
# if the session disconencted while the authorization was
# being checked, stop
if session not in self._session_to_subscriptions:
# if the session *really* disconnected, it won't have
# a _session_id any longer, so we double-check
if session._session_id is not None:
self.log.error(
"Session '{session_id}' still appears valid, but isn't in subscription map",
session_id=session._session_id,
)
self.log.info(
"Session vanished while subscribing to '{topic}'",
topic=subscribe.topic,
)
return
# ok, session authorized to subscribe. now get the subscription
#
subscription, was_already_subscribed, is_first_subscriber = (
self._subscription_map.add_observer(
session, subscribe.topic, subscribe.match, extra=SubscriptionExtra()
)
)
if not was_already_subscribed:
self._session_to_subscriptions[session].add(subscription)
# publish WAMP meta events, if we have a service session, but
# not for the meta API itself!
#
if (
self._router._realm
and self._router._realm.session
and not subscription.uri.startswith("wamp.")
and (is_first_subscriber or not was_already_subscribed)
):
has_follow_up_messages = True
exclude_authid = None
if subscribe.forward_for:
exclude_authid = [ff["authid"] for ff in subscribe.forward_for]
def _publish():
service_session = self._router._realm.session
if exclude_authid or self._router.is_traced:
options = types.PublishOptions(
correlation_id=subscribe.correlation_id,
correlation_is_anchor=False,
correlation_is_last=False,
exclude_authid=exclude_authid,
)
else:
options = None
if is_first_subscriber:
subscription_details = {
"id": subscription.id,
"created": subscription.created,
"uri": subscription.uri,
"match": subscription.match,
}
service_session.publish(
"wamp.subscription.on_create",
session._session_id,
subscription_details,
options=options,
)
if not was_already_subscribed:
if options:
options.correlation_is_last = True
service_session.publish(
"wamp.subscription.on_subscribe",
session._session_id,
subscription.id,
options=options,
)
# we postpone actual sending of meta events until we return to this client session
self._reactor.callLater(0, _publish)
else:
has_follow_up_messages = False
# check for retained events
#
def _get_retained_event():
if subscription.extra.retained_events:
retained_events = list(subscription.extra.retained_events)
retained_events.reverse()
for retained_event in retained_events:
authorized = False
if (
not retained_event.publish.exclude
and not retained_event.publish.eligible
):
authorized = True
elif (
session._session_id in retained_event.publish.eligible
and session._session_id
not in retained_event.publish.exclude
):
authorized = True
if authorized:
publication = util.id()
if retained_event.publish.payload:
msg = message.Event(
subscription.id,
publication,
payload=retained_event.publish.payload,
enc_algo=retained_event.publish.enc_algo,
enc_key=retained_event.publish.enc_key,
enc_serializer=retained_event.publish.enc_serializer,
publisher=retained_event.publisher,
publisher_authid=retained_event.publisher_authid,
publisher_authrole=retained_event.publisher_authrole,
retained=True,
)
else:
msg = message.Event(
subscription.id,
publication,
args=retained_event.publish.args,
kwargs=retained_event.publish.kwargs,
publisher=retained_event.publisher,
publisher_authid=retained_event.publisher_authid,
publisher_authrole=retained_event.publisher_authrole,
retained=True,
)
msg.correlation_id = subscribe.correlation_id
msg.correlation_uri = subscribe.topic
msg.correlation_is_anchor = False
msg.correlation_is_last = False
return [msg]
return []
# acknowledge subscribe with subscription ID
#
replies = [message.Subscribed(subscribe.request, subscription.id)]
replies[0].correlation_id = subscribe.correlation_id
replies[0].correlation_uri = subscribe.topic
replies[0].correlation_is_anchor = False
replies[0].correlation_is_last = False
if subscribe.get_retained:
replies.extend(_get_retained_event())
replies[-1].correlation_is_last = not has_follow_up_messages
# send out reply to subscribe requestor
#
[self._router.send(session, reply) for reply in replies]
def on_authorize_error(err):
"""
the call to authorize the action _itself_ failed (note this is
different from the call to authorize succeed, but the
authorization being denied)
"""
self.log.failure(
"Authorization of 'subscribe' for '{uri}' failed",
uri=subscribe.topic,
failure=err,
)
reply = message.Error(
message.Subscribe.MESSAGE_TYPE,
subscribe.request,
ApplicationError.AUTHORIZATION_FAILED,
[
"failed to authorize session for subscribing to topic URI '{0}': {1}".format(
subscribe.topic, err.value
)
],
)
reply.correlation_id = subscribe.correlation_id
reply.correlation_uri = subscribe.topic
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
txaio.add_callbacks(d, on_authorize_success, on_authorize_error)
|
def processSubscribe(self, session, subscribe):
"""
Implements :func:`crossbar.router.interfaces.IBroker.processSubscribe`
"""
if self._router.is_traced:
if not subscribe.correlation_id:
subscribe.correlation_id = self._router.new_correlation_id()
subscribe.correlation_is_anchor = True
subscribe.correlation_is_last = False
if not subscribe.correlation_uri:
subscribe.correlation_uri = subscribe.topic
self._router._factory._worker._maybe_trace_rx_msg(session, subscribe)
# check topic URI: for SUBSCRIBE, must be valid URI (either strict or loose), and all
# URI components must be non-empty for normal subscriptions, may be empty for
# wildcard subscriptions and must be non-empty for all but the last component for
# prefix subscriptions
#
if self._option_uri_strict:
if subscribe.match == "wildcard":
uri_is_valid = _URI_PAT_STRICT_EMPTY.match(subscribe.topic)
elif subscribe.match == "prefix":
uri_is_valid = _URI_PAT_STRICT_LAST_EMPTY.match(subscribe.topic)
else:
uri_is_valid = _URI_PAT_STRICT_NON_EMPTY.match(subscribe.topic)
else:
if subscribe.match == "wildcard":
uri_is_valid = _URI_PAT_LOOSE_EMPTY.match(subscribe.topic)
elif subscribe.match == "prefix":
uri_is_valid = _URI_PAT_LOOSE_LAST_EMPTY.match(subscribe.topic)
else:
uri_is_valid = _URI_PAT_LOOSE_NON_EMPTY.match(subscribe.topic)
if not uri_is_valid:
reply = message.Error(
message.Subscribe.MESSAGE_TYPE,
subscribe.request,
ApplicationError.INVALID_URI,
["subscribe for invalid topic URI '{0}'".format(subscribe.topic)],
)
reply.correlation_id = subscribe.correlation_id
reply.correlation_uri = subscribe.topic
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# authorize SUBSCRIBE action
#
d = self._router.authorize(
session, subscribe.topic, "subscribe", options=subscribe.marshal_options()
)
def on_authorize_success(authorization):
if not authorization["allow"]:
# error reply since session is not authorized to subscribe
#
replies = [
message.Error(
message.Subscribe.MESSAGE_TYPE,
subscribe.request,
ApplicationError.NOT_AUTHORIZED,
[
"session is not authorized to subscribe to topic '{0}'".format(
subscribe.topic
)
],
)
]
replies[0].correlation_id = subscribe.correlation_id
replies[0].correlation_uri = subscribe.topic
replies[0].correlation_is_anchor = False
replies[0].correlation_is_last = True
else:
# ok, session authorized to subscribe. now get the subscription
#
subscription, was_already_subscribed, is_first_subscriber = (
self._subscription_map.add_observer(
session, subscribe.topic, subscribe.match, extra=SubscriptionExtra()
)
)
if not was_already_subscribed:
self._session_to_subscriptions[session].add(subscription)
# publish WAMP meta events, if we have a service session, but
# not for the meta API itself!
#
if (
self._router._realm
and self._router._realm.session
and not subscription.uri.startswith("wamp.")
and (is_first_subscriber or not was_already_subscribed)
):
has_follow_up_messages = True
exclude_authid = None
if subscribe.forward_for:
exclude_authid = [ff["authid"] for ff in subscribe.forward_for]
def _publish():
service_session = self._router._realm.session
if exclude_authid or self._router.is_traced:
options = types.PublishOptions(
correlation_id=subscribe.correlation_id,
correlation_is_anchor=False,
correlation_is_last=False,
exclude_authid=exclude_authid,
)
else:
options = None
if is_first_subscriber:
subscription_details = {
"id": subscription.id,
"created": subscription.created,
"uri": subscription.uri,
"match": subscription.match,
}
service_session.publish(
"wamp.subscription.on_create",
session._session_id,
subscription_details,
options=options,
)
if not was_already_subscribed:
if options:
options.correlation_is_last = True
service_session.publish(
"wamp.subscription.on_subscribe",
session._session_id,
subscription.id,
options=options,
)
# we postpone actual sending of meta events until we return to this client session
self._reactor.callLater(0, _publish)
else:
has_follow_up_messages = False
# check for retained events
#
def _get_retained_event():
if subscription.extra.retained_events:
retained_events = list(subscription.extra.retained_events)
retained_events.reverse()
for retained_event in retained_events:
authorized = False
if (
not retained_event.publish.exclude
and not retained_event.publish.eligible
):
authorized = True
elif (
session._session_id in retained_event.publish.eligible
and session._session_id
not in retained_event.publish.exclude
):
authorized = True
if authorized:
publication = util.id()
if retained_event.publish.payload:
msg = message.Event(
subscription.id,
publication,
payload=retained_event.publish.payload,
enc_algo=retained_event.publish.enc_algo,
enc_key=retained_event.publish.enc_key,
enc_serializer=retained_event.publish.enc_serializer,
publisher=retained_event.publisher,
publisher_authid=retained_event.publisher_authid,
publisher_authrole=retained_event.publisher_authrole,
retained=True,
)
else:
msg = message.Event(
subscription.id,
publication,
args=retained_event.publish.args,
kwargs=retained_event.publish.kwargs,
publisher=retained_event.publisher,
publisher_authid=retained_event.publisher_authid,
publisher_authrole=retained_event.publisher_authrole,
retained=True,
)
msg.correlation_id = subscribe.correlation_id
msg.correlation_uri = subscribe.topic
msg.correlation_is_anchor = False
msg.correlation_is_last = False
return [msg]
return []
# acknowledge subscribe with subscription ID
#
replies = [message.Subscribed(subscribe.request, subscription.id)]
replies[0].correlation_id = subscribe.correlation_id
replies[0].correlation_uri = subscribe.topic
replies[0].correlation_is_anchor = False
replies[0].correlation_is_last = False
if subscribe.get_retained:
replies.extend(_get_retained_event())
replies[-1].correlation_is_last = not has_follow_up_messages
# send out reply to subscribe requestor
#
[self._router.send(session, reply) for reply in replies]
def on_authorize_error(err):
"""
the call to authorize the action _itself_ failed (note this is
different from the call to authorize succeed, but the
authorization being denied)
"""
self.log.failure(
"Authorization of 'subscribe' for '{uri}' failed",
uri=subscribe.topic,
failure=err,
)
reply = message.Error(
message.Subscribe.MESSAGE_TYPE,
subscribe.request,
ApplicationError.AUTHORIZATION_FAILED,
[
"failed to authorize session for subscribing to topic URI '{0}': {1}".format(
subscribe.topic, err.value
)
],
)
reply.correlation_id = subscribe.correlation_id
reply.correlation_uri = subscribe.topic
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
txaio.add_callbacks(d, on_authorize_success, on_authorize_error)
|
https://github.com/crossbario/crossbar/issues/1576
|
Unhandled error in Deferred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/autobahn/wamp/protocol.py", line 888, in onMessage
txaio.resolve(on_reply, msg.args[0])
File "/usr/local/lib/python3.6/dist-packages/txaio/tx.py", line 468, in resolve
future.callback(result)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 460, in callback
self._startRunCallbacks(result)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 654, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/usr/local/lib/python3.6/dist-packages/crossbar/router/dealer.py", line 441, in on_authorize_success
self._session_to_registrations[session].add(registration)
builtins.KeyError: <crossbar.router.session.RouterSession object at 0x7f653abfaef0>
|
builtins.KeyError
|
def on_authorize_success(authorization):
if not authorization["allow"]:
# error reply since session is not authorized to subscribe
#
replies = [
message.Error(
message.Subscribe.MESSAGE_TYPE,
subscribe.request,
ApplicationError.NOT_AUTHORIZED,
[
"session is not authorized to subscribe to topic '{0}'".format(
subscribe.topic
)
],
)
]
replies[0].correlation_id = subscribe.correlation_id
replies[0].correlation_uri = subscribe.topic
replies[0].correlation_is_anchor = False
replies[0].correlation_is_last = True
else:
# if the session disconencted while the authorization was
# being checked, stop
if session not in self._session_to_subscriptions:
# if the session *really* disconnected, it won't have
# a _session_id any longer, so we double-check
if session._session_id is not None:
self.log.error(
"Session '{session_id}' still appears valid, but isn't in subscription map",
session_id=session._session_id,
)
self.log.info(
"Session vanished while subscribing to '{topic}'",
topic=subscribe.topic,
)
return
# ok, session authorized to subscribe. now get the subscription
#
subscription, was_already_subscribed, is_first_subscriber = (
self._subscription_map.add_observer(
session, subscribe.topic, subscribe.match, extra=SubscriptionExtra()
)
)
if not was_already_subscribed:
self._session_to_subscriptions[session].add(subscription)
# publish WAMP meta events, if we have a service session, but
# not for the meta API itself!
#
if (
self._router._realm
and self._router._realm.session
and not subscription.uri.startswith("wamp.")
and (is_first_subscriber or not was_already_subscribed)
):
has_follow_up_messages = True
exclude_authid = None
if subscribe.forward_for:
exclude_authid = [ff["authid"] for ff in subscribe.forward_for]
def _publish():
service_session = self._router._realm.session
if exclude_authid or self._router.is_traced:
options = types.PublishOptions(
correlation_id=subscribe.correlation_id,
correlation_is_anchor=False,
correlation_is_last=False,
exclude_authid=exclude_authid,
)
else:
options = None
if is_first_subscriber:
subscription_details = {
"id": subscription.id,
"created": subscription.created,
"uri": subscription.uri,
"match": subscription.match,
}
service_session.publish(
"wamp.subscription.on_create",
session._session_id,
subscription_details,
options=options,
)
if not was_already_subscribed:
if options:
options.correlation_is_last = True
service_session.publish(
"wamp.subscription.on_subscribe",
session._session_id,
subscription.id,
options=options,
)
# we postpone actual sending of meta events until we return to this client session
self._reactor.callLater(0, _publish)
else:
has_follow_up_messages = False
# check for retained events
#
def _get_retained_event():
if subscription.extra.retained_events:
retained_events = list(subscription.extra.retained_events)
retained_events.reverse()
for retained_event in retained_events:
authorized = False
if (
not retained_event.publish.exclude
and not retained_event.publish.eligible
):
authorized = True
elif (
session._session_id in retained_event.publish.eligible
and session._session_id not in retained_event.publish.exclude
):
authorized = True
if authorized:
publication = util.id()
if retained_event.publish.payload:
msg = message.Event(
subscription.id,
publication,
payload=retained_event.publish.payload,
enc_algo=retained_event.publish.enc_algo,
enc_key=retained_event.publish.enc_key,
enc_serializer=retained_event.publish.enc_serializer,
publisher=retained_event.publisher,
publisher_authid=retained_event.publisher_authid,
publisher_authrole=retained_event.publisher_authrole,
retained=True,
)
else:
msg = message.Event(
subscription.id,
publication,
args=retained_event.publish.args,
kwargs=retained_event.publish.kwargs,
publisher=retained_event.publisher,
publisher_authid=retained_event.publisher_authid,
publisher_authrole=retained_event.publisher_authrole,
retained=True,
)
msg.correlation_id = subscribe.correlation_id
msg.correlation_uri = subscribe.topic
msg.correlation_is_anchor = False
msg.correlation_is_last = False
return [msg]
return []
# acknowledge subscribe with subscription ID
#
replies = [message.Subscribed(subscribe.request, subscription.id)]
replies[0].correlation_id = subscribe.correlation_id
replies[0].correlation_uri = subscribe.topic
replies[0].correlation_is_anchor = False
replies[0].correlation_is_last = False
if subscribe.get_retained:
replies.extend(_get_retained_event())
replies[-1].correlation_is_last = not has_follow_up_messages
# send out reply to subscribe requestor
#
[self._router.send(session, reply) for reply in replies]
|
def on_authorize_success(authorization):
if not authorization["allow"]:
# error reply since session is not authorized to subscribe
#
replies = [
message.Error(
message.Subscribe.MESSAGE_TYPE,
subscribe.request,
ApplicationError.NOT_AUTHORIZED,
[
"session is not authorized to subscribe to topic '{0}'".format(
subscribe.topic
)
],
)
]
replies[0].correlation_id = subscribe.correlation_id
replies[0].correlation_uri = subscribe.topic
replies[0].correlation_is_anchor = False
replies[0].correlation_is_last = True
else:
# ok, session authorized to subscribe. now get the subscription
#
subscription, was_already_subscribed, is_first_subscriber = (
self._subscription_map.add_observer(
session, subscribe.topic, subscribe.match, extra=SubscriptionExtra()
)
)
if not was_already_subscribed:
self._session_to_subscriptions[session].add(subscription)
# publish WAMP meta events, if we have a service session, but
# not for the meta API itself!
#
if (
self._router._realm
and self._router._realm.session
and not subscription.uri.startswith("wamp.")
and (is_first_subscriber or not was_already_subscribed)
):
has_follow_up_messages = True
exclude_authid = None
if subscribe.forward_for:
exclude_authid = [ff["authid"] for ff in subscribe.forward_for]
def _publish():
service_session = self._router._realm.session
if exclude_authid or self._router.is_traced:
options = types.PublishOptions(
correlation_id=subscribe.correlation_id,
correlation_is_anchor=False,
correlation_is_last=False,
exclude_authid=exclude_authid,
)
else:
options = None
if is_first_subscriber:
subscription_details = {
"id": subscription.id,
"created": subscription.created,
"uri": subscription.uri,
"match": subscription.match,
}
service_session.publish(
"wamp.subscription.on_create",
session._session_id,
subscription_details,
options=options,
)
if not was_already_subscribed:
if options:
options.correlation_is_last = True
service_session.publish(
"wamp.subscription.on_subscribe",
session._session_id,
subscription.id,
options=options,
)
# we postpone actual sending of meta events until we return to this client session
self._reactor.callLater(0, _publish)
else:
has_follow_up_messages = False
# check for retained events
#
def _get_retained_event():
if subscription.extra.retained_events:
retained_events = list(subscription.extra.retained_events)
retained_events.reverse()
for retained_event in retained_events:
authorized = False
if (
not retained_event.publish.exclude
and not retained_event.publish.eligible
):
authorized = True
elif (
session._session_id in retained_event.publish.eligible
and session._session_id not in retained_event.publish.exclude
):
authorized = True
if authorized:
publication = util.id()
if retained_event.publish.payload:
msg = message.Event(
subscription.id,
publication,
payload=retained_event.publish.payload,
enc_algo=retained_event.publish.enc_algo,
enc_key=retained_event.publish.enc_key,
enc_serializer=retained_event.publish.enc_serializer,
publisher=retained_event.publisher,
publisher_authid=retained_event.publisher_authid,
publisher_authrole=retained_event.publisher_authrole,
retained=True,
)
else:
msg = message.Event(
subscription.id,
publication,
args=retained_event.publish.args,
kwargs=retained_event.publish.kwargs,
publisher=retained_event.publisher,
publisher_authid=retained_event.publisher_authid,
publisher_authrole=retained_event.publisher_authrole,
retained=True,
)
msg.correlation_id = subscribe.correlation_id
msg.correlation_uri = subscribe.topic
msg.correlation_is_anchor = False
msg.correlation_is_last = False
return [msg]
return []
# acknowledge subscribe with subscription ID
#
replies = [message.Subscribed(subscribe.request, subscription.id)]
replies[0].correlation_id = subscribe.correlation_id
replies[0].correlation_uri = subscribe.topic
replies[0].correlation_is_anchor = False
replies[0].correlation_is_last = False
if subscribe.get_retained:
replies.extend(_get_retained_event())
replies[-1].correlation_is_last = not has_follow_up_messages
# send out reply to subscribe requestor
#
[self._router.send(session, reply) for reply in replies]
|
https://github.com/crossbario/crossbar/issues/1576
|
Unhandled error in Deferred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/autobahn/wamp/protocol.py", line 888, in onMessage
txaio.resolve(on_reply, msg.args[0])
File "/usr/local/lib/python3.6/dist-packages/txaio/tx.py", line 468, in resolve
future.callback(result)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 460, in callback
self._startRunCallbacks(result)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 654, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/usr/local/lib/python3.6/dist-packages/crossbar/router/dealer.py", line 441, in on_authorize_success
self._session_to_registrations[session].add(registration)
builtins.KeyError: <crossbar.router.session.RouterSession object at 0x7f653abfaef0>
|
builtins.KeyError
|
def processRegister(self, session, register):
"""
Implements :func:`crossbar.router.interfaces.IDealer.processRegister`
"""
# check topic URI: for SUBSCRIBE, must be valid URI (either strict or loose), and all
# URI components must be non-empty other than for wildcard subscriptions
#
if self._router.is_traced:
if not register.correlation_id:
register.correlation_id = self._router.new_correlation_id()
register.correlation_is_anchor = True
register.correlation_is_last = False
if not register.correlation_uri:
register.correlation_uri = register.procedure
self._router._factory._worker._maybe_trace_rx_msg(session, register)
if self._option_uri_strict:
if register.match == "wildcard":
uri_is_valid = _URI_PAT_STRICT_EMPTY.match(register.procedure)
elif register.match == "prefix":
uri_is_valid = _URI_PAT_STRICT_LAST_EMPTY.match(register.procedure)
elif register.match == "exact":
uri_is_valid = _URI_PAT_STRICT_NON_EMPTY.match(register.procedure)
else:
# should not arrive here
raise Exception("logic error")
else:
if register.match == "wildcard":
uri_is_valid = _URI_PAT_LOOSE_EMPTY.match(register.procedure)
elif register.match == "prefix":
uri_is_valid = _URI_PAT_LOOSE_LAST_EMPTY.match(register.procedure)
elif register.match == "exact":
uri_is_valid = _URI_PAT_LOOSE_NON_EMPTY.match(register.procedure)
else:
# should not arrive here
raise Exception("logic error")
if not uri_is_valid:
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.INVALID_URI,
[
"register for invalid procedure URI '{0}' (URI strict checking {1})".format(
register.procedure, self._option_uri_strict
)
],
)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# disallow registration of procedures starting with "wamp." other than for
# trusted sessions (that are sessions built into Crossbar.io routing core)
#
if session._authrole is not None and session._authrole != "trusted":
is_restricted = register.procedure.startswith("wamp.")
if is_restricted:
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.INVALID_URI,
[
"register for restricted procedure URI '{0}')".format(
register.procedure
)
],
)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# authorize REGISTER action
#
d = self._router.authorize(
session, register.procedure, "register", options=register.marshal_options()
)
def on_authorize_success(authorization):
# check the authorization before ANYTHING else, otherwise
# we may leak information about already-registered URIs
# etc.
if not authorization["allow"]:
# error reply since session is not authorized to register
#
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.NOT_AUTHORIZED,
[
"session is not authorized to register procedure '{0}'".format(
register.procedure
)
],
)
# get existing registration for procedure / matching strategy - if any
#
registration = self._registration_map.get_observation(
register.procedure, register.match
)
# if the session disconencted while the authorization was
# being checked, stop
if session not in self._session_to_registrations:
# if the session *really* disconnected, it won't have
# a _session_id any longer, so we double-check
if session._session_id is not None:
self.log.error(
"Session '{session_id}' still appears valid, but isn't in registration map",
session_id=session._session_id,
)
self.log.info(
"Session vanished while registering '{procedure}'",
procedure=register.procedure,
)
assert registration is None
return
# if force_reregister was enabled, we only do any actual
# kicking of existing registrations *after* authorization
if registration and not register.force_reregister:
# there is an existing registration, and that has an
# invocation strategy that only allows a single callee
# on a the given registration
#
if registration.extra.invoke == message.Register.INVOKE_SINGLE:
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.PROCEDURE_ALREADY_EXISTS,
[
"register for already registered procedure '{0}'".format(
register.procedure
)
],
)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# there is an existing registration, and that has an
# invokation strategy different from the one requested
# by the new callee
#
if registration.extra.invoke != register.invoke:
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.PROCEDURE_EXISTS_INVOCATION_POLICY_CONFLICT,
[
"register for already registered procedure '{0}' "
"with conflicting invocation policy (has {1} and "
"{2} was requested)".format(
register.procedure,
registration.extra.invoke,
register.invoke,
)
],
)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# this check is a little redundant, because theoretically
# we already returned (above) if this was False, but for safety...
if authorization["allow"]:
registration = self._registration_map.get_observation(
register.procedure, register.match
)
if register.force_reregister and registration:
for obs in registration.observers:
self._registration_map.drop_observer(obs, registration)
kicked = message.Unregistered(
0,
registration=registration.id,
reason="wamp.error.unregistered",
)
kicked.correlation_id = register.correlation_id
kicked.correlation_uri = register.procedure
kicked.correlation_is_anchor = False
kicked.correlation_is_last = False
self._router.send(obs, kicked)
self._registration_map.delete_observation(registration)
# ok, session authorized to register. now get the registration
#
registration_extra = RegistrationExtra(register.invoke)
registration_callee_extra = RegistrationCalleeExtra(register.concurrency)
registration, was_already_registered, is_first_callee = (
self._registration_map.add_observer(
session,
register.procedure,
register.match,
registration_extra,
registration_callee_extra,
)
)
if not was_already_registered:
self._session_to_registrations[session].add(registration)
# acknowledge register with registration ID
#
reply = message.Registered(register.request, registration.id)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
# publish WAMP meta events, if we have a service session, but
# not for the meta API itself!
#
if (
self._router._realm
and self._router._realm.session
and not registration.uri.startswith("wamp.")
and (is_first_callee or not was_already_registered)
):
reply.correlation_is_last = False
# when this message was forwarded from other nodes, exclude all such nodes
# from receiving the meta event we'll publish below by authid (of the r2r link
# from the forwarding node connected to this router node)
exclude_authid = None
if register.forward_for:
exclude_authid = [ff["authid"] for ff in register.forward_for]
self.log.info(
"WAMP meta event will be published excluding these authids (from forward_for): {exclude_authid}",
exclude_authid=exclude_authid,
)
def _publish():
service_session = self._router._realm.session
if exclude_authid or self._router.is_traced:
options = types.PublishOptions(
correlation_id=register.correlation_id,
correlation_is_anchor=False,
correlation_is_last=False,
exclude_authid=exclude_authid,
)
else:
options = None
if is_first_callee:
registration_details = {
"id": registration.id,
"created": registration.created,
"uri": registration.uri,
"match": registration.match,
"invoke": registration.extra.invoke,
}
service_session.publish(
"wamp.registration.on_create",
session._session_id,
registration_details,
options=options,
)
if not was_already_registered:
if options:
options.correlation_is_last = True
service_session.publish(
"wamp.registration.on_register",
session._session_id,
registration.id,
options=options,
)
# we postpone actual sending of meta events until we return to this client session
self._reactor.callLater(0, _publish)
else:
reply.correlation_is_last = True
# send out reply to register requestor
#
self._router.send(session, reply)
def on_authorize_error(err):
"""
the call to authorize the action _itself_ failed (note this is
different from the call to authorize succeed, but the
authorization being denied)
"""
self.log.failure(
"Authorization of 'register' for '{uri}' failed",
uri=register.procedure,
failure=err,
)
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.AUTHORIZATION_FAILED,
[
"failed to authorize session for registering procedure '{0}': {1}".format(
register.procedure, err.value
)
],
)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
txaio.add_callbacks(d, on_authorize_success, on_authorize_error)
|
def processRegister(self, session, register):
"""
Implements :func:`crossbar.router.interfaces.IDealer.processRegister`
"""
# check topic URI: for SUBSCRIBE, must be valid URI (either strict or loose), and all
# URI components must be non-empty other than for wildcard subscriptions
#
if self._router.is_traced:
if not register.correlation_id:
register.correlation_id = self._router.new_correlation_id()
register.correlation_is_anchor = True
register.correlation_is_last = False
if not register.correlation_uri:
register.correlation_uri = register.procedure
self._router._factory._worker._maybe_trace_rx_msg(session, register)
if self._option_uri_strict:
if register.match == "wildcard":
uri_is_valid = _URI_PAT_STRICT_EMPTY.match(register.procedure)
elif register.match == "prefix":
uri_is_valid = _URI_PAT_STRICT_LAST_EMPTY.match(register.procedure)
elif register.match == "exact":
uri_is_valid = _URI_PAT_STRICT_NON_EMPTY.match(register.procedure)
else:
# should not arrive here
raise Exception("logic error")
else:
if register.match == "wildcard":
uri_is_valid = _URI_PAT_LOOSE_EMPTY.match(register.procedure)
elif register.match == "prefix":
uri_is_valid = _URI_PAT_LOOSE_LAST_EMPTY.match(register.procedure)
elif register.match == "exact":
uri_is_valid = _URI_PAT_LOOSE_NON_EMPTY.match(register.procedure)
else:
# should not arrive here
raise Exception("logic error")
if not uri_is_valid:
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.INVALID_URI,
[
"register for invalid procedure URI '{0}' (URI strict checking {1})".format(
register.procedure, self._option_uri_strict
)
],
)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# disallow registration of procedures starting with "wamp." other than for
# trusted sessions (that are sessions built into Crossbar.io routing core)
#
if session._authrole is not None and session._authrole != "trusted":
is_restricted = register.procedure.startswith("wamp.")
if is_restricted:
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.INVALID_URI,
[
"register for restricted procedure URI '{0}')".format(
register.procedure
)
],
)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# authorize REGISTER action
#
d = self._router.authorize(
session, register.procedure, "register", options=register.marshal_options()
)
def on_authorize_success(authorization):
# check the authorization before ANYTHING else, otherwise
# we may leak information about already-registered URIs
# etc.
if not authorization["allow"]:
# error reply since session is not authorized to register
#
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.NOT_AUTHORIZED,
[
"session is not authorized to register procedure '{0}'".format(
register.procedure
)
],
)
# get existing registration for procedure / matching strategy - if any
#
registration = self._registration_map.get_observation(
register.procedure, register.match
)
# if force_reregister was enabled, we only do any actual
# kicking of existing registrations *after* authorization
if registration and not register.force_reregister:
# there is an existing registration, and that has an
# invocation strategy that only allows a single callee
# on a the given registration
#
if registration.extra.invoke == message.Register.INVOKE_SINGLE:
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.PROCEDURE_ALREADY_EXISTS,
[
"register for already registered procedure '{0}'".format(
register.procedure
)
],
)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# there is an existing registration, and that has an
# invokation strategy different from the one requested
# by the new callee
#
if registration.extra.invoke != register.invoke:
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.PROCEDURE_EXISTS_INVOCATION_POLICY_CONFLICT,
[
"register for already registered procedure '{0}' "
"with conflicting invocation policy (has {1} and "
"{2} was requested)".format(
register.procedure,
registration.extra.invoke,
register.invoke,
)
],
)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# this check is a little redundant, because theoretically
# we already returned (above) if this was False, but for safety...
if authorization["allow"]:
registration = self._registration_map.get_observation(
register.procedure, register.match
)
if register.force_reregister and registration:
for obs in registration.observers:
self._registration_map.drop_observer(obs, registration)
kicked = message.Unregistered(
0,
registration=registration.id,
reason="wamp.error.unregistered",
)
kicked.correlation_id = register.correlation_id
kicked.correlation_uri = register.procedure
kicked.correlation_is_anchor = False
kicked.correlation_is_last = False
self._router.send(obs, kicked)
self._registration_map.delete_observation(registration)
# ok, session authorized to register. now get the registration
#
registration_extra = RegistrationExtra(register.invoke)
registration_callee_extra = RegistrationCalleeExtra(register.concurrency)
registration, was_already_registered, is_first_callee = (
self._registration_map.add_observer(
session,
register.procedure,
register.match,
registration_extra,
registration_callee_extra,
)
)
if not was_already_registered:
self._session_to_registrations[session].add(registration)
# acknowledge register with registration ID
#
reply = message.Registered(register.request, registration.id)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
# publish WAMP meta events, if we have a service session, but
# not for the meta API itself!
#
if (
self._router._realm
and self._router._realm.session
and not registration.uri.startswith("wamp.")
and (is_first_callee or not was_already_registered)
):
reply.correlation_is_last = False
# when this message was forwarded from other nodes, exclude all such nodes
# from receiving the meta event we'll publish below by authid (of the r2r link
# from the forwarding node connected to this router node)
exclude_authid = None
if register.forward_for:
exclude_authid = [ff["authid"] for ff in register.forward_for]
self.log.info(
"WAMP meta event will be published excluding these authids (from forward_for): {exclude_authid}",
exclude_authid=exclude_authid,
)
def _publish():
service_session = self._router._realm.session
if exclude_authid or self._router.is_traced:
options = types.PublishOptions(
correlation_id=register.correlation_id,
correlation_is_anchor=False,
correlation_is_last=False,
exclude_authid=exclude_authid,
)
else:
options = None
if is_first_callee:
registration_details = {
"id": registration.id,
"created": registration.created,
"uri": registration.uri,
"match": registration.match,
"invoke": registration.extra.invoke,
}
service_session.publish(
"wamp.registration.on_create",
session._session_id,
registration_details,
options=options,
)
if not was_already_registered:
if options:
options.correlation_is_last = True
service_session.publish(
"wamp.registration.on_register",
session._session_id,
registration.id,
options=options,
)
# we postpone actual sending of meta events until we return to this client session
self._reactor.callLater(0, _publish)
else:
reply.correlation_is_last = True
# send out reply to register requestor
#
self._router.send(session, reply)
def on_authorize_error(err):
"""
the call to authorize the action _itself_ failed (note this is
different from the call to authorize succeed, but the
authorization being denied)
"""
self.log.failure(
"Authorization of 'register' for '{uri}' failed",
uri=register.procedure,
failure=err,
)
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.AUTHORIZATION_FAILED,
[
"failed to authorize session for registering procedure '{0}': {1}".format(
register.procedure, err.value
)
],
)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
txaio.add_callbacks(d, on_authorize_success, on_authorize_error)
|
https://github.com/crossbario/crossbar/issues/1576
|
Unhandled error in Deferred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/autobahn/wamp/protocol.py", line 888, in onMessage
txaio.resolve(on_reply, msg.args[0])
File "/usr/local/lib/python3.6/dist-packages/txaio/tx.py", line 468, in resolve
future.callback(result)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 460, in callback
self._startRunCallbacks(result)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 654, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/usr/local/lib/python3.6/dist-packages/crossbar/router/dealer.py", line 441, in on_authorize_success
self._session_to_registrations[session].add(registration)
builtins.KeyError: <crossbar.router.session.RouterSession object at 0x7f653abfaef0>
|
builtins.KeyError
|
def on_authorize_success(authorization):
# check the authorization before ANYTHING else, otherwise
# we may leak information about already-registered URIs
# etc.
if not authorization["allow"]:
# error reply since session is not authorized to register
#
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.NOT_AUTHORIZED,
[
"session is not authorized to register procedure '{0}'".format(
register.procedure
)
],
)
# get existing registration for procedure / matching strategy - if any
#
registration = self._registration_map.get_observation(
register.procedure, register.match
)
# if the session disconencted while the authorization was
# being checked, stop
if session not in self._session_to_registrations:
# if the session *really* disconnected, it won't have
# a _session_id any longer, so we double-check
if session._session_id is not None:
self.log.error(
"Session '{session_id}' still appears valid, but isn't in registration map",
session_id=session._session_id,
)
self.log.info(
"Session vanished while registering '{procedure}'",
procedure=register.procedure,
)
assert registration is None
return
# if force_reregister was enabled, we only do any actual
# kicking of existing registrations *after* authorization
if registration and not register.force_reregister:
# there is an existing registration, and that has an
# invocation strategy that only allows a single callee
# on a the given registration
#
if registration.extra.invoke == message.Register.INVOKE_SINGLE:
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.PROCEDURE_ALREADY_EXISTS,
[
"register for already registered procedure '{0}'".format(
register.procedure
)
],
)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# there is an existing registration, and that has an
# invokation strategy different from the one requested
# by the new callee
#
if registration.extra.invoke != register.invoke:
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.PROCEDURE_EXISTS_INVOCATION_POLICY_CONFLICT,
[
"register for already registered procedure '{0}' "
"with conflicting invocation policy (has {1} and "
"{2} was requested)".format(
register.procedure, registration.extra.invoke, register.invoke
)
],
)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# this check is a little redundant, because theoretically
# we already returned (above) if this was False, but for safety...
if authorization["allow"]:
registration = self._registration_map.get_observation(
register.procedure, register.match
)
if register.force_reregister and registration:
for obs in registration.observers:
self._registration_map.drop_observer(obs, registration)
kicked = message.Unregistered(
0,
registration=registration.id,
reason="wamp.error.unregistered",
)
kicked.correlation_id = register.correlation_id
kicked.correlation_uri = register.procedure
kicked.correlation_is_anchor = False
kicked.correlation_is_last = False
self._router.send(obs, kicked)
self._registration_map.delete_observation(registration)
# ok, session authorized to register. now get the registration
#
registration_extra = RegistrationExtra(register.invoke)
registration_callee_extra = RegistrationCalleeExtra(register.concurrency)
registration, was_already_registered, is_first_callee = (
self._registration_map.add_observer(
session,
register.procedure,
register.match,
registration_extra,
registration_callee_extra,
)
)
if not was_already_registered:
self._session_to_registrations[session].add(registration)
# acknowledge register with registration ID
#
reply = message.Registered(register.request, registration.id)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
# publish WAMP meta events, if we have a service session, but
# not for the meta API itself!
#
if (
self._router._realm
and self._router._realm.session
and not registration.uri.startswith("wamp.")
and (is_first_callee or not was_already_registered)
):
reply.correlation_is_last = False
# when this message was forwarded from other nodes, exclude all such nodes
# from receiving the meta event we'll publish below by authid (of the r2r link
# from the forwarding node connected to this router node)
exclude_authid = None
if register.forward_for:
exclude_authid = [ff["authid"] for ff in register.forward_for]
self.log.info(
"WAMP meta event will be published excluding these authids (from forward_for): {exclude_authid}",
exclude_authid=exclude_authid,
)
def _publish():
service_session = self._router._realm.session
if exclude_authid or self._router.is_traced:
options = types.PublishOptions(
correlation_id=register.correlation_id,
correlation_is_anchor=False,
correlation_is_last=False,
exclude_authid=exclude_authid,
)
else:
options = None
if is_first_callee:
registration_details = {
"id": registration.id,
"created": registration.created,
"uri": registration.uri,
"match": registration.match,
"invoke": registration.extra.invoke,
}
service_session.publish(
"wamp.registration.on_create",
session._session_id,
registration_details,
options=options,
)
if not was_already_registered:
if options:
options.correlation_is_last = True
service_session.publish(
"wamp.registration.on_register",
session._session_id,
registration.id,
options=options,
)
# we postpone actual sending of meta events until we return to this client session
self._reactor.callLater(0, _publish)
else:
reply.correlation_is_last = True
# send out reply to register requestor
#
self._router.send(session, reply)
|
def on_authorize_success(authorization):
# check the authorization before ANYTHING else, otherwise
# we may leak information about already-registered URIs
# etc.
if not authorization["allow"]:
# error reply since session is not authorized to register
#
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.NOT_AUTHORIZED,
[
"session is not authorized to register procedure '{0}'".format(
register.procedure
)
],
)
# get existing registration for procedure / matching strategy - if any
#
registration = self._registration_map.get_observation(
register.procedure, register.match
)
# if force_reregister was enabled, we only do any actual
# kicking of existing registrations *after* authorization
if registration and not register.force_reregister:
# there is an existing registration, and that has an
# invocation strategy that only allows a single callee
# on a the given registration
#
if registration.extra.invoke == message.Register.INVOKE_SINGLE:
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.PROCEDURE_ALREADY_EXISTS,
[
"register for already registered procedure '{0}'".format(
register.procedure
)
],
)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# there is an existing registration, and that has an
# invokation strategy different from the one requested
# by the new callee
#
if registration.extra.invoke != register.invoke:
reply = message.Error(
message.Register.MESSAGE_TYPE,
register.request,
ApplicationError.PROCEDURE_EXISTS_INVOCATION_POLICY_CONFLICT,
[
"register for already registered procedure '{0}' "
"with conflicting invocation policy (has {1} and "
"{2} was requested)".format(
register.procedure, registration.extra.invoke, register.invoke
)
],
)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# this check is a little redundant, because theoretically
# we already returned (above) if this was False, but for safety...
if authorization["allow"]:
registration = self._registration_map.get_observation(
register.procedure, register.match
)
if register.force_reregister and registration:
for obs in registration.observers:
self._registration_map.drop_observer(obs, registration)
kicked = message.Unregistered(
0,
registration=registration.id,
reason="wamp.error.unregistered",
)
kicked.correlation_id = register.correlation_id
kicked.correlation_uri = register.procedure
kicked.correlation_is_anchor = False
kicked.correlation_is_last = False
self._router.send(obs, kicked)
self._registration_map.delete_observation(registration)
# ok, session authorized to register. now get the registration
#
registration_extra = RegistrationExtra(register.invoke)
registration_callee_extra = RegistrationCalleeExtra(register.concurrency)
registration, was_already_registered, is_first_callee = (
self._registration_map.add_observer(
session,
register.procedure,
register.match,
registration_extra,
registration_callee_extra,
)
)
if not was_already_registered:
self._session_to_registrations[session].add(registration)
# acknowledge register with registration ID
#
reply = message.Registered(register.request, registration.id)
reply.correlation_id = register.correlation_id
reply.correlation_uri = register.procedure
reply.correlation_is_anchor = False
# publish WAMP meta events, if we have a service session, but
# not for the meta API itself!
#
if (
self._router._realm
and self._router._realm.session
and not registration.uri.startswith("wamp.")
and (is_first_callee or not was_already_registered)
):
reply.correlation_is_last = False
# when this message was forwarded from other nodes, exclude all such nodes
# from receiving the meta event we'll publish below by authid (of the r2r link
# from the forwarding node connected to this router node)
exclude_authid = None
if register.forward_for:
exclude_authid = [ff["authid"] for ff in register.forward_for]
self.log.info(
"WAMP meta event will be published excluding these authids (from forward_for): {exclude_authid}",
exclude_authid=exclude_authid,
)
def _publish():
service_session = self._router._realm.session
if exclude_authid or self._router.is_traced:
options = types.PublishOptions(
correlation_id=register.correlation_id,
correlation_is_anchor=False,
correlation_is_last=False,
exclude_authid=exclude_authid,
)
else:
options = None
if is_first_callee:
registration_details = {
"id": registration.id,
"created": registration.created,
"uri": registration.uri,
"match": registration.match,
"invoke": registration.extra.invoke,
}
service_session.publish(
"wamp.registration.on_create",
session._session_id,
registration_details,
options=options,
)
if not was_already_registered:
if options:
options.correlation_is_last = True
service_session.publish(
"wamp.registration.on_register",
session._session_id,
registration.id,
options=options,
)
# we postpone actual sending of meta events until we return to this client session
self._reactor.callLater(0, _publish)
else:
reply.correlation_is_last = True
# send out reply to register requestor
#
self._router.send(session, reply)
|
https://github.com/crossbario/crossbar/issues/1576
|
Unhandled error in Deferred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/autobahn/wamp/protocol.py", line 888, in onMessage
txaio.resolve(on_reply, msg.args[0])
File "/usr/local/lib/python3.6/dist-packages/txaio/tx.py", line 468, in resolve
future.callback(result)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 460, in callback
self._startRunCallbacks(result)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 654, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/usr/local/lib/python3.6/dist-packages/crossbar/router/dealer.py", line 441, in on_authorize_success
self._session_to_registrations[session].add(registration)
builtins.KeyError: <crossbar.router.session.RouterSession object at 0x7f653abfaef0>
|
builtins.KeyError
|
def processCall(self, session, call):
"""
Implements :func:`crossbar.router.interfaces.IDealer.processCall`
"""
if self._router.is_traced:
if not call.correlation_id:
call.correlation_id = self._router.new_correlation_id()
call.correlation_is_anchor = True
call.correlation_is_last = False
if not call.correlation_uri:
call.correlation_uri = call.procedure
self._router._factory._worker._maybe_trace_rx_msg(session, call)
# check procedure URI: for CALL, must be valid URI (either strict or loose), and
# all URI components must be non-empty
if self._option_uri_strict:
uri_is_valid = _URI_PAT_STRICT_NON_EMPTY.match(call.procedure)
else:
uri_is_valid = _URI_PAT_LOOSE_NON_EMPTY.match(call.procedure)
if not uri_is_valid:
reply = message.Error(
message.Call.MESSAGE_TYPE,
call.request,
ApplicationError.INVALID_URI,
[
"call with invalid procedure URI '{0}' (URI strict checking {1})".format(
call.procedure, self._option_uri_strict
)
],
)
reply.correlation_id = call.correlation_id
reply.correlation_uri = call.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# authorize CALL action
#
d = self._router.authorize(
session, call.procedure, "call", options=call.marshal_options()
)
def on_authorize_success(authorization):
# the call to authorize the action _itself_ succeeded. now go on depending on whether
# the action was actually authorized or not ..
#
if not authorization["allow"]:
reply = message.Error(
message.Call.MESSAGE_TYPE,
call.request,
ApplicationError.NOT_AUTHORIZED,
[
"session is not authorized to call procedure '{0}'".format(
call.procedure
)
],
)
reply.correlation_id = call.correlation_id
reply.correlation_uri = call.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
else:
# get registrations active on the procedure called
#
registration = self._registration_map.best_matching_observation(
call.procedure
)
# if the session disconencted while the authorization
# was being checked, 'registration' will be None and
# we'll (correctly) fire an error.
if registration:
# validate payload (skip in "payload_transparency" mode)
#
if call.payload is None:
try:
self._router.validate(
"call", call.procedure, call.args, call.kwargs
)
except Exception as e:
reply = message.Error(
message.Call.MESSAGE_TYPE,
call.request,
ApplicationError.INVALID_ARGUMENT,
[
"call of procedure '{0}' with invalid application payload: {1}".format(
call.procedure, e
)
],
)
reply.correlation_id = call.correlation_id
reply.correlation_uri = call.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# now actually perform the invocation of the callee ..
#
self._call(session, call, registration, authorization)
else:
reply = message.Error(
message.Call.MESSAGE_TYPE,
call.request,
ApplicationError.NO_SUCH_PROCEDURE,
["no callee registered for procedure <{0}>".format(call.procedure)],
)
reply.correlation_id = call.correlation_id
reply.correlation_uri = call.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
def on_authorize_error(err):
"""
the call to authorize the action _itself_ failed (note this is
different from the call to authorize succeed, but the
authorization being denied)
"""
self.log.failure(
"Authorization of 'call' for '{uri}' failed",
uri=call.procedure,
failure=err,
)
reply = message.Error(
message.Call.MESSAGE_TYPE,
call.request,
ApplicationError.AUTHORIZATION_FAILED,
[
"failed to authorize session for calling procedure '{0}': {1}".format(
call.procedure, err.value
)
],
)
reply.correlation_id = call.correlation_id
reply.correlation_uri = call.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
txaio.add_callbacks(d, on_authorize_success, on_authorize_error)
|
def processCall(self, session, call):
"""
Implements :func:`crossbar.router.interfaces.IDealer.processCall`
"""
if self._router.is_traced:
if not call.correlation_id:
call.correlation_id = self._router.new_correlation_id()
call.correlation_is_anchor = True
call.correlation_is_last = False
if not call.correlation_uri:
call.correlation_uri = call.procedure
self._router._factory._worker._maybe_trace_rx_msg(session, call)
# check procedure URI: for CALL, must be valid URI (either strict or loose), and
# all URI components must be non-empty
if self._option_uri_strict:
uri_is_valid = _URI_PAT_STRICT_NON_EMPTY.match(call.procedure)
else:
uri_is_valid = _URI_PAT_LOOSE_NON_EMPTY.match(call.procedure)
if not uri_is_valid:
reply = message.Error(
message.Call.MESSAGE_TYPE,
call.request,
ApplicationError.INVALID_URI,
[
"call with invalid procedure URI '{0}' (URI strict checking {1})".format(
call.procedure, self._option_uri_strict
)
],
)
reply.correlation_id = call.correlation_id
reply.correlation_uri = call.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# authorize CALL action
#
d = self._router.authorize(
session, call.procedure, "call", options=call.marshal_options()
)
def on_authorize_success(authorization):
# the call to authorize the action _itself_ succeeded. now go on depending on whether
# the action was actually authorized or not ..
#
if not authorization["allow"]:
reply = message.Error(
message.Call.MESSAGE_TYPE,
call.request,
ApplicationError.NOT_AUTHORIZED,
[
"session is not authorized to call procedure '{0}'".format(
call.procedure
)
],
)
reply.correlation_id = call.correlation_id
reply.correlation_uri = call.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
else:
# get registrations active on the procedure called
#
registration = self._registration_map.best_matching_observation(
call.procedure
)
if registration:
# validate payload (skip in "payload_transparency" mode)
#
if call.payload is None:
try:
self._router.validate(
"call", call.procedure, call.args, call.kwargs
)
except Exception as e:
reply = message.Error(
message.Call.MESSAGE_TYPE,
call.request,
ApplicationError.INVALID_ARGUMENT,
[
"call of procedure '{0}' with invalid application payload: {1}".format(
call.procedure, e
)
],
)
reply.correlation_id = call.correlation_id
reply.correlation_uri = call.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# now actually perform the invocation of the callee ..
#
self._call(session, call, registration, authorization)
else:
reply = message.Error(
message.Call.MESSAGE_TYPE,
call.request,
ApplicationError.NO_SUCH_PROCEDURE,
["no callee registered for procedure <{0}>".format(call.procedure)],
)
reply.correlation_id = call.correlation_id
reply.correlation_uri = call.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
def on_authorize_error(err):
"""
the call to authorize the action _itself_ failed (note this is
different from the call to authorize succeed, but the
authorization being denied)
"""
self.log.failure(
"Authorization of 'call' for '{uri}' failed",
uri=call.procedure,
failure=err,
)
reply = message.Error(
message.Call.MESSAGE_TYPE,
call.request,
ApplicationError.AUTHORIZATION_FAILED,
[
"failed to authorize session for calling procedure '{0}': {1}".format(
call.procedure, err.value
)
],
)
reply.correlation_id = call.correlation_id
reply.correlation_uri = call.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
txaio.add_callbacks(d, on_authorize_success, on_authorize_error)
|
https://github.com/crossbario/crossbar/issues/1576
|
Unhandled error in Deferred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/autobahn/wamp/protocol.py", line 888, in onMessage
txaio.resolve(on_reply, msg.args[0])
File "/usr/local/lib/python3.6/dist-packages/txaio/tx.py", line 468, in resolve
future.callback(result)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 460, in callback
self._startRunCallbacks(result)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 654, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/usr/local/lib/python3.6/dist-packages/crossbar/router/dealer.py", line 441, in on_authorize_success
self._session_to_registrations[session].add(registration)
builtins.KeyError: <crossbar.router.session.RouterSession object at 0x7f653abfaef0>
|
builtins.KeyError
|
def on_authorize_success(authorization):
# the call to authorize the action _itself_ succeeded. now go on depending on whether
# the action was actually authorized or not ..
#
if not authorization["allow"]:
reply = message.Error(
message.Call.MESSAGE_TYPE,
call.request,
ApplicationError.NOT_AUTHORIZED,
[
"session is not authorized to call procedure '{0}'".format(
call.procedure
)
],
)
reply.correlation_id = call.correlation_id
reply.correlation_uri = call.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
else:
# get registrations active on the procedure called
#
registration = self._registration_map.best_matching_observation(call.procedure)
# if the session disconencted while the authorization
# was being checked, 'registration' will be None and
# we'll (correctly) fire an error.
if registration:
# validate payload (skip in "payload_transparency" mode)
#
if call.payload is None:
try:
self._router.validate(
"call", call.procedure, call.args, call.kwargs
)
except Exception as e:
reply = message.Error(
message.Call.MESSAGE_TYPE,
call.request,
ApplicationError.INVALID_ARGUMENT,
[
"call of procedure '{0}' with invalid application payload: {1}".format(
call.procedure, e
)
],
)
reply.correlation_id = call.correlation_id
reply.correlation_uri = call.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# now actually perform the invocation of the callee ..
#
self._call(session, call, registration, authorization)
else:
reply = message.Error(
message.Call.MESSAGE_TYPE,
call.request,
ApplicationError.NO_SUCH_PROCEDURE,
["no callee registered for procedure <{0}>".format(call.procedure)],
)
reply.correlation_id = call.correlation_id
reply.correlation_uri = call.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
|
def on_authorize_success(authorization):
# the call to authorize the action _itself_ succeeded. now go on depending on whether
# the action was actually authorized or not ..
#
if not authorization["allow"]:
reply = message.Error(
message.Call.MESSAGE_TYPE,
call.request,
ApplicationError.NOT_AUTHORIZED,
[
"session is not authorized to call procedure '{0}'".format(
call.procedure
)
],
)
reply.correlation_id = call.correlation_id
reply.correlation_uri = call.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
else:
# get registrations active on the procedure called
#
registration = self._registration_map.best_matching_observation(call.procedure)
if registration:
# validate payload (skip in "payload_transparency" mode)
#
if call.payload is None:
try:
self._router.validate(
"call", call.procedure, call.args, call.kwargs
)
except Exception as e:
reply = message.Error(
message.Call.MESSAGE_TYPE,
call.request,
ApplicationError.INVALID_ARGUMENT,
[
"call of procedure '{0}' with invalid application payload: {1}".format(
call.procedure, e
)
],
)
reply.correlation_id = call.correlation_id
reply.correlation_uri = call.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
return
# now actually perform the invocation of the callee ..
#
self._call(session, call, registration, authorization)
else:
reply = message.Error(
message.Call.MESSAGE_TYPE,
call.request,
ApplicationError.NO_SUCH_PROCEDURE,
["no callee registered for procedure <{0}>".format(call.procedure)],
)
reply.correlation_id = call.correlation_id
reply.correlation_uri = call.procedure
reply.correlation_is_anchor = False
reply.correlation_is_last = True
self._router.send(session, reply)
|
https://github.com/crossbario/crossbar/issues/1576
|
Unhandled error in Deferred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/autobahn/wamp/protocol.py", line 888, in onMessage
txaio.resolve(on_reply, msg.args[0])
File "/usr/local/lib/python3.6/dist-packages/txaio/tx.py", line 468, in resolve
future.callback(result)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 460, in callback
self._startRunCallbacks(result)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 654, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/usr/local/lib/python3.6/dist-packages/crossbar/router/dealer.py", line 441, in on_authorize_success
self._session_to_registrations[session].add(registration)
builtins.KeyError: <crossbar.router.session.RouterSession object at 0x7f653abfaef0>
|
builtins.KeyError
|
def onMessage(self, msg):
"""
Implements :func:`autobahn.wamp.interfaces.ITransportHandler.onMessage`
"""
if self._session_id is None:
if not self._pending_session_id:
self._pending_session_id = util.id()
def welcome(
realm,
authid=None,
authrole=None,
authmethod=None,
authprovider=None,
authextra=None,
custom=None,
):
self._realm = realm
self._session_id = self._pending_session_id
self._pending_session_id = None
self._goodbye_sent = False
self._router = self._router_factory.get(realm)
if not self._router:
# should not arrive here
raise Exception(
"logic error (no realm at a stage were we should have one)"
)
self._authid = authid
self._authrole = authrole
self._authmethod = authmethod
self._authprovider = authprovider
self._authextra = authextra or {}
self._authextra["x_cb_node_id"] = self._router_factory._node_id
self._authextra["x_cb_peer"] = str(self._transport.peer)
self._authextra["x_cb_pid"] = os.getpid()
roles = self._router.attach(self)
msg = message.Welcome(
self._session_id,
roles,
realm=realm,
authid=authid,
authrole=authrole,
authmethod=authmethod,
authprovider=authprovider,
authextra=self._authextra,
custom=custom,
)
self._transport.send(msg)
self.onJoin(
SessionDetails(
self._realm,
self._session_id,
self._authid,
self._authrole,
self._authmethod,
self._authprovider,
self._authextra,
)
)
# the first message MUST be HELLO
if isinstance(msg, message.Hello):
self._session_roles = msg.roles
details = types.HelloDetails(
realm=msg.realm,
authmethods=msg.authmethods,
authid=msg.authid,
authrole=msg.authrole,
authextra=msg.authextra,
session_roles=msg.roles,
pending_session=self._pending_session_id,
)
d = txaio.as_future(self.onHello, msg.realm, details)
def success(res):
msg = None
# it is possible this session has disconnected
# while onHello was taking place
if self._transport is None:
self.log.info(
"Client session disconnected during authentication",
)
return
if isinstance(res, types.Accept):
custom = {"x_cb_node_id": self._router_factory._node_id}
welcome(
res.realm,
res.authid,
res.authrole,
res.authmethod,
res.authprovider,
res.authextra,
custom,
)
elif isinstance(res, types.Challenge):
msg = message.Challenge(res.method, res.extra)
elif isinstance(res, types.Deny):
msg = message.Abort(res.reason, res.message)
else:
pass
if msg:
self._transport.send(msg)
txaio.add_callbacks(d, success, self._swallow_error_and_abort)
elif isinstance(msg, message.Authenticate):
d = txaio.as_future(self.onAuthenticate, msg.signature, {})
def success(res):
msg = None
# it is possible this session has disconnected
# while authentication was taking place
if self._transport is None:
self.log.info(
"Client session disconnected during authentication",
)
return
if isinstance(res, types.Accept):
custom = {"x_cb_node_id": self._router_factory._node_id}
welcome(
res.realm,
res.authid,
res.authrole,
res.authmethod,
res.authprovider,
res.authextra,
custom,
)
elif isinstance(res, types.Deny):
msg = message.Abort(res.reason, res.message)
else:
pass
if msg:
self._transport.send(msg)
txaio.add_callbacks(d, success, self._swallow_error_and_abort)
elif isinstance(msg, message.Abort):
# fire callback and close the transport
self.onLeave(types.CloseDetails(msg.reason, msg.message))
self._session_id = None
self._pending_session_id = None
# self._transport.close()
else:
# raise ProtocolError(u"PReceived {0} message while session is not joined".format(msg.__class__))
# self.log.warn('Protocol state error - received {message} while session is not joined')
# swallow all noise like still getting PUBLISH messages from log event forwarding - maybe FIXME
pass
else:
if isinstance(msg, message.Hello):
raise ProtocolError(
"HELLO message received, while session is already established"
)
elif isinstance(msg, message.Goodbye):
if not self._goodbye_sent:
# The peer wants to close: answer with GOODBYE reply.
# Note: We MUST NOT send any WAMP message _after_ GOODBYE
reply = message.Goodbye()
self._transport.send(reply)
self._goodbye_sent = True
else:
# This is the peer's GOODBYE reply to our own earlier GOODBYE
pass
# We need to first detach the session from the router before
# erasing the session ID below ..
try:
self._router.detach(self)
except Exception:
self.log.failure("Internal error")
# In order to send wamp.session.on_leave properly
# (i.e. *with* the proper session_id) we save it
previous_session_id = self._session_id
# At this point, we've either sent GOODBYE already earlier,
# or we have just responded with GOODBYE. In any case, we MUST NOT
# send any WAMP message from now on:
# clear out session ID, so that anything that might be triggered
# in the onLeave below is prohibited from sending WAMP stuff.
# E.g. the client might have been subscribed to meta events like
# wamp.session.on_leave - and we must not send that client's own
# leave to itself!
self._session_id = None
self._pending_session_id = None
# publish event, *after* self._session_id is None so
# that we don't publish to ourselves as well (if this
# session happens to be subscribed to wamp.session.on_leave)
if self._service_session:
self._service_session.publish(
"wamp.session.on_leave",
previous_session_id,
)
# fire callback and close the transport
self.onLeave(types.CloseDetails(msg.reason, msg.message))
# don't close the transport, as WAMP allows to reattach a session
# to the same or a different realm without closing the transport
# self._transport.close()
else:
self._router.process(self, msg)
|
def onMessage(self, msg):
"""
Implements :func:`autobahn.wamp.interfaces.ITransportHandler.onMessage`
"""
if self._session_id is None:
if not self._pending_session_id:
self._pending_session_id = util.id()
def welcome(
realm,
authid=None,
authrole=None,
authmethod=None,
authprovider=None,
authextra=None,
custom=None,
):
self._realm = realm
self._session_id = self._pending_session_id
self._pending_session_id = None
self._goodbye_sent = False
self._router = self._router_factory.get(realm)
if not self._router:
# should not arrive here
raise Exception(
"logic error (no realm at a stage were we should have one)"
)
self._authid = authid
self._authrole = authrole
self._authmethod = authmethod
self._authprovider = authprovider
self._authextra = authextra or {}
self._authextra["x_cb_node_id"] = self._router_factory._node_id
self._authextra["x_cb_peer"] = str(self._transport.peer)
self._authextra["x_cb_pid"] = os.getpid()
roles = self._router.attach(self)
msg = message.Welcome(
self._session_id,
roles,
realm=realm,
authid=authid,
authrole=authrole,
authmethod=authmethod,
authprovider=authprovider,
authextra=self._authextra,
custom=custom,
)
self._transport.send(msg)
self.onJoin(
SessionDetails(
self._realm,
self._session_id,
self._authid,
self._authrole,
self._authmethod,
self._authprovider,
self._authextra,
)
)
# the first message MUST be HELLO
if isinstance(msg, message.Hello):
self._session_roles = msg.roles
details = types.HelloDetails(
realm=msg.realm,
authmethods=msg.authmethods,
authid=msg.authid,
authrole=msg.authrole,
authextra=msg.authextra,
session_roles=msg.roles,
pending_session=self._pending_session_id,
)
d = txaio.as_future(self.onHello, msg.realm, details)
def success(res):
msg = None
if isinstance(res, types.Accept):
custom = {"x_cb_node_id": self._router_factory._node_id}
welcome(
res.realm,
res.authid,
res.authrole,
res.authmethod,
res.authprovider,
res.authextra,
custom,
)
elif isinstance(res, types.Challenge):
msg = message.Challenge(res.method, res.extra)
elif isinstance(res, types.Deny):
msg = message.Abort(res.reason, res.message)
else:
pass
if msg:
self._transport.send(msg)
txaio.add_callbacks(d, success, self._swallow_error_and_abort)
elif isinstance(msg, message.Authenticate):
d = txaio.as_future(self.onAuthenticate, msg.signature, {})
def success(res):
msg = None
if isinstance(res, types.Accept):
custom = {"x_cb_node_id": self._router_factory._node_id}
welcome(
res.realm,
res.authid,
res.authrole,
res.authmethod,
res.authprovider,
res.authextra,
custom,
)
elif isinstance(res, types.Deny):
msg = message.Abort(res.reason, res.message)
else:
pass
if msg:
self._transport.send(msg)
txaio.add_callbacks(d, success, self._swallow_error_and_abort)
elif isinstance(msg, message.Abort):
# fire callback and close the transport
self.onLeave(types.CloseDetails(msg.reason, msg.message))
self._session_id = None
self._pending_session_id = None
# self._transport.close()
else:
# raise ProtocolError(u"PReceived {0} message while session is not joined".format(msg.__class__))
# self.log.warn('Protocol state error - received {message} while session is not joined')
# swallow all noise like still getting PUBLISH messages from log event forwarding - maybe FIXME
pass
else:
if isinstance(msg, message.Hello):
raise ProtocolError(
"HELLO message received, while session is already established"
)
elif isinstance(msg, message.Goodbye):
if not self._goodbye_sent:
# The peer wants to close: answer with GOODBYE reply.
# Note: We MUST NOT send any WAMP message _after_ GOODBYE
reply = message.Goodbye()
self._transport.send(reply)
self._goodbye_sent = True
else:
# This is the peer's GOODBYE reply to our own earlier GOODBYE
pass
# We need to first detach the session from the router before
# erasing the session ID below ..
try:
self._router.detach(self)
except Exception:
self.log.failure("Internal error")
# In order to send wamp.session.on_leave properly
# (i.e. *with* the proper session_id) we save it
previous_session_id = self._session_id
# At this point, we've either sent GOODBYE already earlier,
# or we have just responded with GOODBYE. In any case, we MUST NOT
# send any WAMP message from now on:
# clear out session ID, so that anything that might be triggered
# in the onLeave below is prohibited from sending WAMP stuff.
# E.g. the client might have been subscribed to meta events like
# wamp.session.on_leave - and we must not send that client's own
# leave to itself!
self._session_id = None
self._pending_session_id = None
# publish event, *after* self._session_id is None so
# that we don't publish to ourselves as well (if this
# session happens to be subscribed to wamp.session.on_leave)
if self._service_session:
self._service_session.publish(
"wamp.session.on_leave",
previous_session_id,
)
# fire callback and close the transport
self.onLeave(types.CloseDetails(msg.reason, msg.message))
# don't close the transport, as WAMP allows to reattach a session
# to the same or a different realm without closing the transport
# self._transport.close()
else:
self._router.process(self, msg)
|
https://github.com/crossbario/crossbar/issues/1576
|
Unhandled error in Deferred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/autobahn/wamp/protocol.py", line 888, in onMessage
txaio.resolve(on_reply, msg.args[0])
File "/usr/local/lib/python3.6/dist-packages/txaio/tx.py", line 468, in resolve
future.callback(result)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 460, in callback
self._startRunCallbacks(result)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 654, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/usr/local/lib/python3.6/dist-packages/crossbar/router/dealer.py", line 441, in on_authorize_success
self._session_to_registrations[session].add(registration)
builtins.KeyError: <crossbar.router.session.RouterSession object at 0x7f653abfaef0>
|
builtins.KeyError
|
def success(res):
msg = None
# it is possible this session has disconnected
# while authentication was taking place
if self._transport is None:
self.log.info(
"Client session disconnected during authentication",
)
return
if isinstance(res, types.Accept):
custom = {"x_cb_node_id": self._router_factory._node_id}
welcome(
res.realm,
res.authid,
res.authrole,
res.authmethod,
res.authprovider,
res.authextra,
custom,
)
elif isinstance(res, types.Deny):
msg = message.Abort(res.reason, res.message)
else:
pass
if msg:
self._transport.send(msg)
|
def success(res):
msg = None
if isinstance(res, types.Accept):
custom = {"x_cb_node_id": self._router_factory._node_id}
welcome(
res.realm,
res.authid,
res.authrole,
res.authmethod,
res.authprovider,
res.authextra,
custom,
)
elif isinstance(res, types.Deny):
msg = message.Abort(res.reason, res.message)
else:
pass
if msg:
self._transport.send(msg)
|
https://github.com/crossbario/crossbar/issues/1576
|
Unhandled error in Deferred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/autobahn/wamp/protocol.py", line 888, in onMessage
txaio.resolve(on_reply, msg.args[0])
File "/usr/local/lib/python3.6/dist-packages/txaio/tx.py", line 468, in resolve
future.callback(result)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 460, in callback
self._startRunCallbacks(result)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 654, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/usr/local/lib/python3.6/dist-packages/crossbar/router/dealer.py", line 441, in on_authorize_success
self._session_to_registrations[session].add(registration)
builtins.KeyError: <crossbar.router.session.RouterSession object at 0x7f653abfaef0>
|
builtins.KeyError
|
def create(transport, path, config):
personality = transport.worker.personality
personality.WEB_SERVICE_CHECKERS["publisher"](personality, config)
# create a vanilla session: the publisher will use this to inject events
#
publisher_session_config = ComponentConfig(realm=config["realm"], extra=None)
publisher_session = ApplicationSession(publisher_session_config)
# add the publisher session to the router
#
router = transport._worker._router_session_factory._routerFactory._routers[
config["realm"]
]
transport._worker._router_session_factory.add(
publisher_session, router, authrole=config.get("role", "anonymous")
)
# now create the publisher Twisted Web resource
#
resource = PublisherResource(config.get("options", {}), publisher_session)
return RouterWebServiceRestPublisher(transport, path, config, resource)
|
def create(transport, path, config):
personality = transport.worker.personality
personality.WEB_SERVICE_CHECKERS["publisher"](personality, config)
# create a vanilla session: the publisher will use this to inject events
#
publisher_session_config = ComponentConfig(realm=config["realm"], extra=None)
publisher_session = ApplicationSession(publisher_session_config)
# add the publisher session to the router
#
transport._worker._router_session_factory.add(
publisher_session, authrole=config.get("role", "anonymous")
)
# now create the publisher Twisted Web resource
#
resource = PublisherResource(config.get("options", {}), publisher_session)
return RouterWebServiceRestPublisher(transport, path, config, resource)
|
https://github.com/crossbario/crossbar/issues/1590
|
2019-05-18T14:50:35+0000 [Router 18] Starting "publisher" Web service on path "pub" of transport "transport001" <crossbar.worker.router.RouterController.start_web_transport_service>
2019-05-18T14:50:35+0000 [Router 18] RouterController.onUserError(): "TypeError: add() missing 1 required positional argument: 'router'"
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 1613, in unwindGenerator
return _cancellableInlineCallbacks(gen)
File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 1529, in _cancellableInlineCallbacks
_inlineCallbacks(None, g, status)
File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks
result = g.send(result)
File "/usr/local/lib/python3.7/site-packages/crossbar/worker/router.py", line 844, in start_web_transport_service
webservice = yield maybeDeferred(webservice_factory.create, transport, path, config)
--- <exception caught here> ---
File "/usr/local/lib/python3.7/site-packages/crossbar/worker/router.py", line 844, in start_web_transport_service
webservice = yield maybeDeferred(webservice_factory.create, transport, path, config)
File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 151, in maybeDeferred
result = f(*args, **kw)
File "/usr/local/lib/python3.7/site-packages/crossbar/webservice/rest.py", line 59, in create
authrole=config.get('role', 'anonymous'))
builtins.TypeError: add() missing 1 required positional argument: 'router'
2019-05-18T14:50:35+0000 [Controller 1] Could not start node: Traceback (most recent call last):
--- <exception caught here> ---
File "/usr/local/lib/python3.7/site-packages/crossbar/node/node.py", line 375, in start
yield self.personality.Node.boot(self)
File "/usr/local/lib/python3.7/site-packages/crossbar/node/node.py", line 498, in boot_from_config
yield d
File "/usr/local/lib/python3.7/site-packages/crossbar/node/node.py", line 475, in configure_worker
yield config_fn(worker_logname, worker_id, worker)
File "/usr/local/lib/python3.7/site-packages/crossbar/node/node.py", line 661, in _configure_native_worker_router
options=CallOptions())
autobahn.wamp.exception.ApplicationError: ApplicationError(error=<wamp.error.runtime_error>, args=["add() missing 1 required positional argument: 'router'"], kwargs={}, enc_algo=None, callee=None, callee_authid=$
one, callee_authrole=None, forward_for=None)
|
builtins.TypeError
|
def create(transport, path, config):
personality = transport.worker.personality
personality.WEB_SERVICE_CHECKERS["caller"](personality, config)
# create a vanilla session: the caller will use this to inject calls
#
caller_session_config = ComponentConfig(realm=config["realm"], extra=None)
caller_session = ApplicationSession(caller_session_config)
# add the calling session to the router
#
router = transport._worker._router_session_factory._routerFactory._routers[
config["realm"]
]
transport._worker._router_session_factory.add(
caller_session, router, authrole=config.get("role", "anonymous")
)
# now create the caller Twisted Web resource
#
resource = CallerResource(config.get("options", {}), caller_session)
return RouterWebServiceRestCaller(transport, path, config, resource)
|
def create(transport, path, config):
personality = transport.worker.personality
personality.WEB_SERVICE_CHECKERS["caller"](personality, config)
# create a vanilla session: the caller will use this to inject calls
#
caller_session_config = ComponentConfig(realm=config["realm"], extra=None)
caller_session = ApplicationSession(caller_session_config)
# add the calling session to the router
#
transport._worker._router_session_factory.add(
caller_session, authrole=config.get("role", "anonymous")
)
# now create the caller Twisted Web resource
#
resource = CallerResource(config.get("options", {}), caller_session)
return RouterWebServiceRestCaller(transport, path, config, resource)
|
https://github.com/crossbario/crossbar/issues/1590
|
2019-05-18T14:50:35+0000 [Router 18] Starting "publisher" Web service on path "pub" of transport "transport001" <crossbar.worker.router.RouterController.start_web_transport_service>
2019-05-18T14:50:35+0000 [Router 18] RouterController.onUserError(): "TypeError: add() missing 1 required positional argument: 'router'"
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 1613, in unwindGenerator
return _cancellableInlineCallbacks(gen)
File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 1529, in _cancellableInlineCallbacks
_inlineCallbacks(None, g, status)
File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks
result = g.send(result)
File "/usr/local/lib/python3.7/site-packages/crossbar/worker/router.py", line 844, in start_web_transport_service
webservice = yield maybeDeferred(webservice_factory.create, transport, path, config)
--- <exception caught here> ---
File "/usr/local/lib/python3.7/site-packages/crossbar/worker/router.py", line 844, in start_web_transport_service
webservice = yield maybeDeferred(webservice_factory.create, transport, path, config)
File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 151, in maybeDeferred
result = f(*args, **kw)
File "/usr/local/lib/python3.7/site-packages/crossbar/webservice/rest.py", line 59, in create
authrole=config.get('role', 'anonymous'))
builtins.TypeError: add() missing 1 required positional argument: 'router'
2019-05-18T14:50:35+0000 [Controller 1] Could not start node: Traceback (most recent call last):
--- <exception caught here> ---
File "/usr/local/lib/python3.7/site-packages/crossbar/node/node.py", line 375, in start
yield self.personality.Node.boot(self)
File "/usr/local/lib/python3.7/site-packages/crossbar/node/node.py", line 498, in boot_from_config
yield d
File "/usr/local/lib/python3.7/site-packages/crossbar/node/node.py", line 475, in configure_worker
yield config_fn(worker_logname, worker_id, worker)
File "/usr/local/lib/python3.7/site-packages/crossbar/node/node.py", line 661, in _configure_native_worker_router
options=CallOptions())
autobahn.wamp.exception.ApplicationError: ApplicationError(error=<wamp.error.runtime_error>, args=["add() missing 1 required positional argument: 'router'"], kwargs={}, enc_algo=None, callee=None, callee_authid=$
one, callee_authrole=None, forward_for=None)
|
builtins.TypeError
|
def create(transport, path, config):
personality = transport.worker.personality
personality.WEB_SERVICE_CHECKERS["webhook"](personality, config)
# create a vanilla session: the webhook will use this to inject events
#
webhook_session_config = ComponentConfig(realm=config["realm"], extra=None)
webhook_session = ApplicationSession(webhook_session_config)
# add the webhook session to the router
#
router = transport._worker._router_session_factory._routerFactory._routers[
config["realm"]
]
transport._worker._router_session_factory.add(
webhook_session, router, authrole=config.get("role", "anonymous")
)
# now create the webhook Twisted Web resource
#
resource = WebhookResource(config.get("options", {}), webhook_session)
return RouterWebServiceWebhook(transport, path, config, resource)
|
def create(transport, path, config):
personality = transport.worker.personality
personality.WEB_SERVICE_CHECKERS["webhook"](personality, config)
# create a vanilla session: the webhook will use this to inject events
#
webhook_session_config = ComponentConfig(realm=config["realm"], extra=None)
webhook_session = ApplicationSession(webhook_session_config)
# add the webhook session to the router
#
transport._worker._router_session_factory.add(
webhook_session, authrole=config.get("role", "anonymous")
)
# now create the webhook Twisted Web resource
#
resource = WebhookResource(config.get("options", {}), webhook_session)
return RouterWebServiceWebhook(transport, path, config, resource)
|
https://github.com/crossbario/crossbar/issues/1590
|
2019-05-18T14:50:35+0000 [Router 18] Starting "publisher" Web service on path "pub" of transport "transport001" <crossbar.worker.router.RouterController.start_web_transport_service>
2019-05-18T14:50:35+0000 [Router 18] RouterController.onUserError(): "TypeError: add() missing 1 required positional argument: 'router'"
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 1613, in unwindGenerator
return _cancellableInlineCallbacks(gen)
File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 1529, in _cancellableInlineCallbacks
_inlineCallbacks(None, g, status)
File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks
result = g.send(result)
File "/usr/local/lib/python3.7/site-packages/crossbar/worker/router.py", line 844, in start_web_transport_service
webservice = yield maybeDeferred(webservice_factory.create, transport, path, config)
--- <exception caught here> ---
File "/usr/local/lib/python3.7/site-packages/crossbar/worker/router.py", line 844, in start_web_transport_service
webservice = yield maybeDeferred(webservice_factory.create, transport, path, config)
File "/usr/local/lib/python3.7/site-packages/twisted/internet/defer.py", line 151, in maybeDeferred
result = f(*args, **kw)
File "/usr/local/lib/python3.7/site-packages/crossbar/webservice/rest.py", line 59, in create
authrole=config.get('role', 'anonymous'))
builtins.TypeError: add() missing 1 required positional argument: 'router'
2019-05-18T14:50:35+0000 [Controller 1] Could not start node: Traceback (most recent call last):
--- <exception caught here> ---
File "/usr/local/lib/python3.7/site-packages/crossbar/node/node.py", line 375, in start
yield self.personality.Node.boot(self)
File "/usr/local/lib/python3.7/site-packages/crossbar/node/node.py", line 498, in boot_from_config
yield d
File "/usr/local/lib/python3.7/site-packages/crossbar/node/node.py", line 475, in configure_worker
yield config_fn(worker_logname, worker_id, worker)
File "/usr/local/lib/python3.7/site-packages/crossbar/node/node.py", line 661, in _configure_native_worker_router
options=CallOptions())
autobahn.wamp.exception.ApplicationError: ApplicationError(error=<wamp.error.runtime_error>, args=["add() missing 1 required positional argument: 'router'"], kwargs={}, enc_algo=None, callee=None, callee_authid=$
one, callee_authrole=None, forward_for=None)
|
builtins.TypeError
|
def start(self):
"""
Starts this node. This will start a node controller and then spawn new worker
processes as needed.
"""
if not self._config:
raise Exception("No node configuration set")
# get controller config/options
#
controller_config = self._config.get("controller", {})
controller_options = controller_config.get("options", {})
# set controller process title
#
try:
import setproctitle
except ImportError:
self.log.warn(
"Warning, could not set process title (setproctitle not installed)"
)
else:
setproctitle.setproctitle(
controller_options.get("title", "crossbar-controller")
)
# local node management router
#
self._router_factory = RouterFactory(None)
self._router_session_factory = RouterSessionFactory(self._router_factory)
rlm_config = {"name": self._realm}
rlm = RouterRealm(None, rlm_config)
router = self._router_factory.start_realm(rlm)
# setup global static roles
#
self._add_global_roles()
# always add a realm service session
#
cfg = ComponentConfig(self._realm)
rlm.session = (self.ROUTER_SERVICE)(cfg, router)
self._router_session_factory.add(rlm.session, authrole="trusted")
self.log.debug(
"Router service session attached [{router_service}]",
router_service=qual(self.ROUTER_SERVICE),
)
# add the node controller singleton component
#
self._controller = self.NODE_CONTROLLER(self)
self._router_session_factory.add(self._controller, authrole="trusted")
self.log.debug(
"Node controller attached [{node_controller}]",
node_controller=qual(self.NODE_CONTROLLER),
)
# add extra node controller components
#
self._add_extra_controller_components(controller_options)
# setup Node shutdown triggers
#
self._set_shutdown_triggers(controller_options)
panic = False
try:
# startup the node personality ..
yield self._startup()
# .. and notify systemd that we are fully up and running
try:
import sdnotify
sdnotify.SystemdNotifier().notify("READY=1")
except:
# do nothing on non-systemd platforms
pass
except ApplicationError as e:
panic = True
self.log.error("{msg}", msg=e.error_message())
except Exception:
panic = True
self.log.failure()
self.log.error("fatal: could not startup node")
if panic:
try:
self._reactor.stop()
except twisted.internet.error.ReactorNotRunning:
pass
|
def start(self):
"""
Starts this node. This will start a node controller and then spawn new worker
processes as needed.
"""
if not self._config:
raise Exception("No node configuration set")
# get controller config/options
#
controller_config = self._config.get("controller", {})
controller_options = controller_config.get("options", {})
# set controller process title
#
try:
import setproctitle
except ImportError:
self.log.warn(
"Warning, could not set process title (setproctitle not installed)"
)
else:
setproctitle.setproctitle(
controller_options.get("title", "crossbar-controller")
)
# local node management router
#
self._router_factory = RouterFactory(None)
self._router_session_factory = RouterSessionFactory(self._router_factory)
rlm_config = {"name": self._realm}
rlm = RouterRealm(None, rlm_config)
router = self._router_factory.start_realm(rlm)
# setup global static roles
#
self._add_global_roles()
# always add a realm service session
#
cfg = ComponentConfig(self._realm)
rlm.session = (self.ROUTER_SERVICE)(cfg, router)
self._router_session_factory.add(rlm.session, authrole="trusted")
self.log.debug(
"Router service session attached [{router_service}]",
router_service=qual(self.ROUTER_SERVICE),
)
# add the node controller singleton component
#
self._controller = self.NODE_CONTROLLER(self)
self._router_session_factory.add(self._controller, authrole="trusted")
self.log.debug(
"Node controller attached [{node_controller}]",
node_controller=qual(self.NODE_CONTROLLER),
)
# add extra node controller components
#
self._add_extra_controller_components(controller_options)
# setup Node shutdown triggers
#
self._set_shutdown_triggers(controller_options)
panic = False
try:
# startup the node personality ..
yield self._startup()
# .. and notify systemd that we are fully up and running
try:
import sdnotify
sdnotify.SystemdNotifier().notify("READY=1")
except:
# do nothing on non-systemd platforms
pass
except ApplicationError as e:
panic = True
self.log.error("{msg}", msg=e.error_message())
except Exception:
panic = True
self.log.failure("Could not startup node: {log_failure.value}")
if panic:
try:
self._reactor.stop()
except twisted.internet.error.ReactorNotRunning:
pass
|
https://github.com/crossbario/crossbar/issues/1179
|
2017-09-05T14:52:34+0200 [Controller 15960] Starting 2 workers ...
2017-09-05T14:52:34+0200 [Controller 15960] Router worker "worker-001" starting ..
2017-09-05T14:52:34+0200 [Router 15969] Started Router worker "worker-001" [crossbar.worker.router.RouterWorkerSession / CPython-EPollReactor]
2017-09-05T14:52:34+0200 [Router 15969] Router worker "worker-001" session 3303279962187294 initializing ..
2017-09-05T14:52:34+0200 [Router 15969] Registered 35 procedures
2017-09-05T14:52:34+0200 [Router 15969] Router worker "worker-001" session ready
2017-09-05T14:52:34+0200 [Controller 15960] Router worker "worker-001" process 15969 started
2017-09-05T14:52:34+0200 [Router 15969] RouterServiceSession ready [configured on_ready fired]
2017-09-05T14:52:34+0200 [Router 15969] Realm 'realm1' started
2017-09-05T14:52:34+0200 [Controller 15960] Router 'worker-001': realm 'realm-001' (named 'realm1') started
2017-09-05T14:52:34+0200 [Router 15969] role role-001 on realm realm-001 started
2017-09-05T14:52:34+0200 [Controller 15960] Router 'worker-001': role 'role-001' (named 'authenticator') started on realm 'realm-001'
2017-09-05T14:52:34+0200 [Router 15969] role role-002 on realm realm-001 started
2017-09-05T14:52:34+0200 [Controller 15960] Router 'worker-001': role 'role-002' (named 'public') started on realm 'realm-001'
2017-09-05T14:52:34+0200 [Router 15969] started component: labgrid.remote.authenticator.AuthenticatorSession id=1175882440106437
2017-09-05T14:52:34+0200 [Controller 15960] Router 'worker-001': component 'component-001' started
2017-09-05T14:52:34+0200 [Router 15969] Site starting on 20408
2017-09-05T14:52:34+0200 [Controller 15960] Router 'worker-001': transport 'transport-001' started
2017-09-05T14:52:34+0200 [Controller 15960] Could not startup node: Traceback (most recent call last):
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/twisted/internet/defer.py", line 653, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/twisted/internet/defer.py", line 1442, in gotResult
_inlineCallbacks(r, g, deferred)
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/twisted/internet/defer.py", line 1384, in _inlineCallbacks
result = result.throwExceptionIntoGenerator(g)
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/twisted/python/failure.py", line 393, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
--- <exception caught here> ---
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/crossbar/controller/node.py", line 597, in start
yield self._startup()
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/twisted/internet/defer.py", line 1386, in _inlineCallbacks
result = g.send(result)
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/crossbar/controller/node.py", line 656, in _configure_node_from_config
assert worker_type in self._native_workers
builtins.AssertionError:
|
builtins.AssertionError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.