after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def raise_uncaught_exception(self, exc):
if settings.DEBUG:
request = self.request
renderer_format = getattr(request.accepted_renderer, "format")
use_plaintext_traceback = renderer_format not in ("html", "api", "admin")
request.force_plaintext_errors(use_plaintext_traceback)
raise
|
def raise_uncaught_exception(self, exc):
if settings.DEBUG:
request = self.request
renderer_format = getattr(request.accepted_renderer, "format")
use_plaintext_traceback = renderer_format not in ("html", "api", "admin")
request.force_plaintext_errors(use_plaintext_traceback)
raise exc
|
https://github.com/encode/django-rest-framework/issues/4631
|
Traceback (most recent call last):
File "/Users/coagulant/projects/myproject/project/tests/api/account_tests.py", line 346, in test_put_account_detail_restricted_fields_200
{'email': u'some.unconfirmed@email.com'})
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/rest_framework/test.py", line 307, in put
path, data=data, format=format, content_type=content_type, **extra)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/rest_framework/test.py", line 225, in put
return self.generic('PUT', path, data, content_type, **extra)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/django/test/client.py", line 380, in generic
return self.request(**r)
File "/Users/coagulant/projects/myproject/project/tests/fixtures/clients.py", line 27, in request
return super(DRFAPIClient, self).request(**kwargs)
File "/Users/coagulant/projects/myproject/project/tests/fixtures/clients.py", line 17, in request
return super(AutoPrependBasePathMixin, self).request(**kwargs)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/rest_framework/test.py", line 288, in request
return super(APIClient, self).request(**kwargs)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/rest_framework/test.py", line 240, in request
request = super(APIRequestFactory, self).request(**kwargs)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/django/test/client.py", line 449, in request
response = self.handler(environ)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/django/test/client.py", line 123, in __call__
response = self.get_response(request)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/rest_framework/test.py", line 260, in get_response
return super(ForceAuthClientHandler, self).get_response(request)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/django/core/handlers/base.py", line 230, in get_response
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/django/core/handlers/base.py", line 149, in get_response
response = self.process_exception_by_middleware(e, request)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/django/core/handlers/base.py", line 147, in get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/rest_framework/viewsets.py", line 83, in view
return self.dispatch(request, *args, **kwargs)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/rest_framework/views.py", line 477, in dispatch
response = self.handle_exception(exc)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/rest_framework/views.py", line 437, in handle_exception
self.raise_uncaught_exception(exc)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/rest_framework/views.py", line 474, in dispatch
response = handler(request, *args, **kwargs)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/rest_framework/mixins.py", line 78, in update
return Response(serializer.data)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/rest_framework/serializers.py", line 504, in data
ret = super(Serializer, self).data
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/rest_framework/serializers.py", line 239, in data
self._data = self.to_representation(self.instance)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/rest_framework/serializers.py", line 473, in to_representation
ret[field.field_name] = field.to_representation(attribute)
File "/Users/coagulant/.envs/myproject/lib/python2.7/site-packages/rest_framework/relations.py", line 371, in to_representation
url = self.get_url(value, self.view_name, request, format)
File "/Users/coagulant/projects/myproject/project/api/serializers.py", line 19, in get_url
version = kwargs['request'].version
KeyError: 'request'
|
KeyError
|
def get_serializer_fields(self, path, method, callback, view):
"""
Return a list of `coreapi.Field` instances corresponding to any
request body input, as determined by the serializer class.
"""
if method not in ("PUT", "PATCH", "POST"):
return []
fields = []
if not (
hasattr(view, "get_serializer_class")
and callable(getattr(view, "get_serializer_class"))
):
return []
serializer_class = view.get_serializer_class()
serializer = serializer_class()
if isinstance(serializer, serializers.ListSerializer):
return coreapi.Field(name="data", location="body", required=True)
if not isinstance(serializer, serializers.Serializer):
return []
for field in serializer.fields.values():
if field.read_only:
continue
required = field.required and method != "PATCH"
field = coreapi.Field(name=field.source, location="form", required=required)
fields.append(field)
return fields
|
def get_serializer_fields(self, path, method, callback, view):
"""
Return a list of `coreapi.Field` instances corresponding to any
request body input, as determined by the serializer class.
"""
if method not in ("PUT", "PATCH", "POST"):
return []
fields = []
serializer_class = view.get_serializer_class()
serializer = serializer_class()
if isinstance(serializer, serializers.ListSerializer):
return coreapi.Field(name="data", location="body", required=True)
if not isinstance(serializer, serializers.Serializer):
return []
for field in serializer.fields.values():
if field.read_only:
continue
required = field.required and method != "PATCH"
field = coreapi.Field(name=field.source, location="form", required=required)
fields.append(field)
return fields
|
https://github.com/encode/django-rest-framework/issues/4265
|
Traceback (most recent call last):
File "/home/ashish/Env/backend/lib/python3.4/site-packages/django/core/handlers/base.py", line 149, in get_response
response = self.process_exception_by_middleware(e, request)
File "/home/ashish/Env/backend/lib/python3.4/site-packages/django/core/handlers/base.py", line 147, in get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/home/ashish/Env/backend/lib/python3.4/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/home/ashish/Env/backend/lib/python3.4/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/home/ashish/Env/backend/lib/python3.4/site-packages/rest_framework/views.py", line 466, in dispatch
response = self.handle_exception(exc)
File "/home/ashish/Env/backend/lib/python3.4/site-packages/rest_framework/views.py", line 463, in dispatch
response = handler(request, *args, **kwargs)
File "/home/ashish/Env/backend/lib/python3.4/site-packages/rest_framework/decorators.py", line 52, in handler
return func(*args, **kwargs)
File "/home/ashish/Projects/backend/oyster/config/swagger.py", line 7, in schema_view
generator = schemas.SchemaGenerator(title='Bookings API')
File "/home/ashish/Env/backend/lib/python3.4/site-packages/rest_framework/schemas.py", line 74, in __init__
self.endpoints = self.get_api_endpoints(patterns)
File "/home/ashish/Env/backend/lib/python3.4/site-packages/rest_framework/schemas.py", line 128, in get_api_endpoints
prefix=path_regex
File "/home/ashish/Env/backend/lib/python3.4/site-packages/rest_framework/schemas.py", line 121, in get_api_endpoints
link = self.get_link(path, method, callback)
File "/home/ashish/Env/backend/lib/python3.4/site-packages/rest_framework/schemas.py", line 196, in get_link
fields += self.get_serializer_fields(path, method, callback, view)
File "/home/ashish/Env/backend/lib/python3.4/site-packages/rest_framework/schemas.py", line 256, in get_serializer_fields
serializer_class = view.get_serializer_class()
AttributeError: 'LogoutView' object has no attribute 'get_serializer_class'
|
AttributeError
|
def as_form_field(self):
if self.value is None:
return ""
values = {}
for key, value in self.value.items():
if isinstance(value, (list, dict)):
values[key] = value
else:
values[key] = "" if value is None else force_text(value)
return self.__class__(self._field, values, self.errors, self._prefix)
|
def as_form_field(self):
values = {}
for key, value in self.value.items():
if isinstance(value, (list, dict)):
values[key] = value
else:
values[key] = "" if value is None else force_text(value)
return self.__class__(self._field, values, self.errors, self._prefix)
|
https://github.com/encode/django-rest-framework/issues/3260
|
Traceback (most recent call last):
File "<path_to_virtualenv>/lib/python2.7/site-packages/django/core/handlers/base.py", line 164, in get_response
response = response.render()
File "<path_to_virtualenv>/lib/python2.7/site-packages/django/template/response.py", line 158, in render
self.content = self.rendered_content
File "<path_to_virtualenv>/lib/python2.7/site-packages/rest_framework/response.py", line 60, in rendered_content
ret = renderer.render(self.data, media_type, context)
File "<path_to_virtualenv>/lib/python2.7/site-packages/rest_framework/renderers.py", line 669, in render
context = self.get_context(data, accepted_media_type, renderer_context)
File "<path_to_virtualenv>/lib/python2.7/site-packages/rest_framework/renderers.py", line 646, in get_context
'post_form': self.get_rendered_html_form(data, view, 'POST', request),
File "<path_to_virtualenv>/lib/python2.7/site-packages/rest_framework/renderers.py", line 513, in get_rendered_html_form
[('template', 'rest_framework/api_form.html')]
File "<path_to_virtualenv>/lib/python2.7/site-packages/rest_framework/renderers.py", line 367, in render
return template.render(context)
File "<path_to_virtualenv>/lib/python2.7/site-packages/django/template/backends/django.py", line 74, in render
return self.template.render(context)
File "<path_to_virtualenv>/lib/python2.7/site-packages/django/template/base.py", line 209, in render
return self._render(context)
File "<path_to_virtualenv>/lib/python2.7/site-packages/django/template/base.py", line 201, in _render
return self.nodelist.render(context)
File "<path_to_virtualenv>/lib/python2.7/site-packages/django/template/base.py", line 903, in render
bit = self.render_node(node, context)
File "<path_to_virtualenv>/lib/python2.7/site-packages/django/template/debug.py", line 79, in render_node
return node.render(context)
File "<path_to_virtualenv>/lib/python2.7/site-packages/django/template/defaulttags.py", line 217, in render
nodelist.append(node.render(context))
File "<path_to_virtualenv>/lib/python2.7/site-packages/django/template/defaulttags.py", line 329, in render
return nodelist.render(context)
File "<path_to_virtualenv>/lib/python2.7/site-packages/django/template/base.py", line 903, in render
bit = self.render_node(node, context)
File "<path_to_virtualenv>/lib/python2.7/site-packages/django/template/debug.py", line 79, in render_node
return node.render(context)
File "<path_to_virtualenv>/lib/python2.7/site-packages/django/template/base.py", line 1195, in render
return func(*resolved_args, **resolved_kwargs)
File "<path_to_virtualenv>/lib/python2.7/site-packages/rest_framework/templatetags/rest_framework.py", line 31, in render_field
return renderer.render_field(field, style)
File "<path_to_virtualenv>/lib/python2.7/site-packages/rest_framework/renderers.py", line 325, in render_field
field = field.as_form_field()
File "<path_to_virtualenv>/lib/python2.7/site-packages/rest_framework/utils/serializer_helpers.py", line 99, in as_form_field
for key, value in self.value.items():
AttributeError: 'NoneType' object has no attribute 'items'
|
AttributeError
|
def to_representation(self, value):
if not value:
return None
if self.format is None:
return value
# Applying a `DateField` to a datetime value is almost always
# not a sensible thing to do, as it means naively dropping
# any explicit or implicit timezone info.
assert not isinstance(value, datetime.datetime), (
"Expected a `date`, but got a `datetime`. Refusing to coerce, "
"as this may mean losing timezone information. Use a custom "
"read-only field and deal with timezone issues explicitly."
)
if self.format.lower() == ISO_8601:
if isinstance(value, str):
value = datetime.datetime.strptime(value, "%Y-%m-%d").date()
return value.isoformat()
return value.strftime(self.format)
|
def to_representation(self, value):
if self.format is None:
return value
# Applying a `DateField` to a datetime value is almost always
# not a sensible thing to do, as it means naively dropping
# any explicit or implicit timezone info.
assert not isinstance(value, datetime.datetime), (
"Expected a `date`, but got a `datetime`. Refusing to coerce, "
"as this may mean losing timezone information. Use a custom "
"read-only field and deal with timezone issues explicitly."
)
if self.format.lower() == ISO_8601:
return value.isoformat()
return value.strftime(self.format)
|
https://github.com/encode/django-rest-framework/issues/2687
|
Traceback (most recent call last):
File "tests.py", line 10, in test_post_root_view
response = self.view(request).render()
File "/path/to/django/views/decorators/csrf.py", line 57, in wrapped_view
return view_func(*args, **kwargs)
File "/path/to/rest_framework/viewsets.py", line 85, in view
return self.dispatch(request, *args, **kwargs)
File "/path/to/rest_framework/views.py", line 452, in dispatch
response = self.handle_exception(exc)
File "/path/to/rest_framework/views.py", line 449, in dispatch
response = handler(request, *args, **kwargs)
File "/path/to/rest_framework/mixins.py", line 57, in retrieve
return Response(serializer.data)
File "/path/to/rest_framework/serializers.py", line 467, in data
ret = super(Serializer, self).data
File "/path/to/rest_framework/serializers.py", line 213, in data
self._data = self.to_representation(self.instance)
File "/path/to/rest_framework/serializers.py", line 436, in to_representation
ret[field.field_name] = field.to_representation(attribute)
File "/path/to/rest_framework/fields.py", line 940, in to_representation
return value.isoformat()
AttributeError: 'str' object has no attribute 'isoformat'
|
AttributeError
|
def set_context(self, serializer_field):
self.user = serializer_field.context["request"].user
|
def set_context(self, serializer_field):
self.is_update = serializer_field.parent.instance is not None
|
https://github.com/encode/django-rest-framework/issues/1945
|
======================================================================
ERROR: test_that_when_serializing_a_user_with_a_modified_password_but_without_the_old_password_then_the_serializer_is_not_valid (users.tests.unit.test_user_serializer.UserSerializationTestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/omer/Documents/Projects/startup/users/tests/unit/test_user_serializer.py", line 29, in test_that_when_serializing_a_user_with_a_modified_password_but_without_the_old_password_then_the_serializer_is_not_valid
sut.is_valid()
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/serializers.py", line 103, in is_valid
self._validated_data = self.run_validation(self._initial_data)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/serializers.py", line 328, in run_validation
self.run_validators(value)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/fields.py", line 275, in run_validators
validator(value)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/validators.py", line 71, in __call__
(field_name, value[field_name]) for field_name in self.fields
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/validators.py", line 71, in <listcomp>
(field_name, value[field_name]) for field_name in self.fields
KeyError: 'slug'
|
KeyError
|
def __call__(self):
return self.user
|
def __call__(self):
if self.is_update:
raise SkipField()
if callable(self.default):
return self.default()
return self.default
|
https://github.com/encode/django-rest-framework/issues/1945
|
======================================================================
ERROR: test_that_when_serializing_a_user_with_a_modified_password_but_without_the_old_password_then_the_serializer_is_not_valid (users.tests.unit.test_user_serializer.UserSerializationTestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/omer/Documents/Projects/startup/users/tests/unit/test_user_serializer.py", line 29, in test_that_when_serializing_a_user_with_a_modified_password_but_without_the_old_password_then_the_serializer_is_not_valid
sut.is_valid()
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/serializers.py", line 103, in is_valid
self._validated_data = self.run_validation(self._initial_data)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/serializers.py", line 328, in run_validation
self.run_validators(value)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/fields.py", line 275, in run_validators
validator(value)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/validators.py", line 71, in __call__
(field_name, value[field_name]) for field_name in self.fields
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/validators.py", line 71, in <listcomp>
(field_name, value[field_name]) for field_name in self.fields
KeyError: 'slug'
|
KeyError
|
def set_context(self, serializer_field):
"""
This hook is called by the serializer instance,
prior to the validation call being made.
"""
# Determine the underlying model field name. This may not be the
# same as the serializer field name if `source=<>` is set.
self.field_name = serializer_field.source_attrs[0]
# Determine the existing instance, if this is an update operation.
self.instance = getattr(serializer_field.parent, "instance", None)
|
def set_context(self, serializer_field):
# Determine the underlying model field name. This may not be the
# same as the serializer field name if `source=<>` is set.
self.field_name = serializer_field.source_attrs[0]
# Determine the existing instance, if this is an update operation.
self.instance = getattr(serializer_field.parent, "instance", None)
|
https://github.com/encode/django-rest-framework/issues/1945
|
======================================================================
ERROR: test_that_when_serializing_a_user_with_a_modified_password_but_without_the_old_password_then_the_serializer_is_not_valid (users.tests.unit.test_user_serializer.UserSerializationTestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/omer/Documents/Projects/startup/users/tests/unit/test_user_serializer.py", line 29, in test_that_when_serializing_a_user_with_a_modified_password_but_without_the_old_password_then_the_serializer_is_not_valid
sut.is_valid()
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/serializers.py", line 103, in is_valid
self._validated_data = self.run_validation(self._initial_data)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/serializers.py", line 328, in run_validation
self.run_validators(value)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/fields.py", line 275, in run_validators
validator(value)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/validators.py", line 71, in __call__
(field_name, value[field_name]) for field_name in self.fields
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/validators.py", line 71, in <listcomp>
(field_name, value[field_name]) for field_name in self.fields
KeyError: 'slug'
|
KeyError
|
def set_context(self, serializer):
"""
This hook is called by the serializer instance,
prior to the validation call being made.
"""
# Determine the existing instance, if this is an update operation.
self.instance = getattr(serializer, "instance", None)
|
def set_context(self, serializer):
# Determine the existing instance, if this is an update operation.
self.instance = getattr(serializer, "instance", None)
|
https://github.com/encode/django-rest-framework/issues/1945
|
======================================================================
ERROR: test_that_when_serializing_a_user_with_a_modified_password_but_without_the_old_password_then_the_serializer_is_not_valid (users.tests.unit.test_user_serializer.UserSerializationTestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/omer/Documents/Projects/startup/users/tests/unit/test_user_serializer.py", line 29, in test_that_when_serializing_a_user_with_a_modified_password_but_without_the_old_password_then_the_serializer_is_not_valid
sut.is_valid()
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/serializers.py", line 103, in is_valid
self._validated_data = self.run_validation(self._initial_data)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/serializers.py", line 328, in run_validation
self.run_validators(value)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/fields.py", line 275, in run_validators
validator(value)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/validators.py", line 71, in __call__
(field_name, value[field_name]) for field_name in self.fields
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/validators.py", line 71, in <listcomp>
(field_name, value[field_name]) for field_name in self.fields
KeyError: 'slug'
|
KeyError
|
def __call__(self, attrs):
self.enforce_required_fields(attrs)
queryset = self.queryset
queryset = self.filter_queryset(attrs, queryset)
queryset = self.exclude_current_instance(attrs, queryset)
if queryset.exists():
field_names = ", ".join(self.fields)
raise ValidationError(self.message.format(field_names=field_names))
|
def __call__(self, attrs):
# Ensure uniqueness.
filter_kwargs = dict(
[(field_name, attrs[field_name]) for field_name in self.fields]
)
queryset = self.queryset.filter(**filter_kwargs)
if self.instance is not None:
queryset = queryset.exclude(pk=self.instance.pk)
if queryset.exists():
field_names = ", ".join(self.fields)
raise ValidationError(self.message.format(field_names=field_names))
|
https://github.com/encode/django-rest-framework/issues/1945
|
======================================================================
ERROR: test_that_when_serializing_a_user_with_a_modified_password_but_without_the_old_password_then_the_serializer_is_not_valid (users.tests.unit.test_user_serializer.UserSerializationTestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/omer/Documents/Projects/startup/users/tests/unit/test_user_serializer.py", line 29, in test_that_when_serializing_a_user_with_a_modified_password_but_without_the_old_password_then_the_serializer_is_not_valid
sut.is_valid()
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/serializers.py", line 103, in is_valid
self._validated_data = self.run_validation(self._initial_data)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/serializers.py", line 328, in run_validation
self.run_validators(value)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/fields.py", line 275, in run_validators
validator(value)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/validators.py", line 71, in __call__
(field_name, value[field_name]) for field_name in self.fields
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/validators.py", line 71, in <listcomp>
(field_name, value[field_name]) for field_name in self.fields
KeyError: 'slug'
|
KeyError
|
def set_context(self, serializer):
"""
This hook is called by the serializer instance,
prior to the validation call being made.
"""
# Determine the underlying model field names. These may not be the
# same as the serializer field names if `source=<>` is set.
self.field_name = serializer.fields[self.field].source_attrs[0]
self.date_field_name = serializer.fields[self.date_field].source_attrs[0]
# Determine the existing instance, if this is an update operation.
self.instance = getattr(serializer, "instance", None)
|
def set_context(self, serializer):
# Determine the underlying model field names. These may not be the
# same as the serializer field names if `source=<>` is set.
self.field_name = serializer.fields[self.field].source_attrs[0]
self.date_field_name = serializer.fields[self.date_field].source_attrs[0]
# Determine the existing instance, if this is an update operation.
self.instance = getattr(serializer, "instance", None)
|
https://github.com/encode/django-rest-framework/issues/1945
|
======================================================================
ERROR: test_that_when_serializing_a_user_with_a_modified_password_but_without_the_old_password_then_the_serializer_is_not_valid (users.tests.unit.test_user_serializer.UserSerializationTestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/omer/Documents/Projects/startup/users/tests/unit/test_user_serializer.py", line 29, in test_that_when_serializing_a_user_with_a_modified_password_but_without_the_old_password_then_the_serializer_is_not_valid
sut.is_valid()
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/serializers.py", line 103, in is_valid
self._validated_data = self.run_validation(self._initial_data)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/serializers.py", line 328, in run_validation
self.run_validators(value)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/fields.py", line 275, in run_validators
validator(value)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/validators.py", line 71, in __call__
(field_name, value[field_name]) for field_name in self.fields
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/validators.py", line 71, in <listcomp>
(field_name, value[field_name]) for field_name in self.fields
KeyError: 'slug'
|
KeyError
|
def __call__(self, attrs):
self.enforce_required_fields(attrs)
queryset = self.queryset
queryset = self.filter_queryset(attrs, queryset)
queryset = self.exclude_current_instance(attrs, queryset)
if queryset.exists():
message = self.message.format(date_field=self.date_field)
raise ValidationError({self.field: message})
|
def __call__(self, attrs):
filter_kwargs = self.get_filter_kwargs(attrs)
queryset = self.queryset.filter(**filter_kwargs)
if self.instance is not None:
queryset = queryset.exclude(pk=self.instance.pk)
if queryset.exists():
message = self.message.format(date_field=self.date_field)
raise ValidationError({self.field: message})
|
https://github.com/encode/django-rest-framework/issues/1945
|
======================================================================
ERROR: test_that_when_serializing_a_user_with_a_modified_password_but_without_the_old_password_then_the_serializer_is_not_valid (users.tests.unit.test_user_serializer.UserSerializationTestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/omer/Documents/Projects/startup/users/tests/unit/test_user_serializer.py", line 29, in test_that_when_serializing_a_user_with_a_modified_password_but_without_the_old_password_then_the_serializer_is_not_valid
sut.is_valid()
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/serializers.py", line 103, in is_valid
self._validated_data = self.run_validation(self._initial_data)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/serializers.py", line 328, in run_validation
self.run_validators(value)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/fields.py", line 275, in run_validators
validator(value)
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/validators.py", line 71, in __call__
(field_name, value[field_name]) for field_name in self.fields
File "/home/omer/.virtualenvs/startup/lib/python3.4/site-packages/rest_framework/validators.py", line 71, in <listcomp>
(field_name, value[field_name]) for field_name in self.fields
KeyError: 'slug'
|
KeyError
|
def _write_mseed(
stream,
filename,
encoding=None,
reclen=None,
byteorder=None,
sequence_number=None,
flush=True,
verbose=0,
**_kwargs,
):
"""
Write Mini-SEED file from a Stream object.
.. warning::
This function should NOT be called directly, it registers via the
the :meth:`~obspy.core.stream.Stream.write` method of an
ObsPy :class:`~obspy.core.stream.Stream` object, call this instead.
:type stream: :class:`~obspy.core.stream.Stream`
:param stream: A Stream object.
:type filename: str
:param filename: Name of the output file or a file-like object.
:type encoding: int or str, optional
:param encoding: Should be set to one of the following supported Mini-SEED
data encoding formats: ``ASCII`` (``0``)*, ``INT16`` (``1``),
``INT32`` (``3``), ``FLOAT32`` (``4``)*, ``FLOAT64`` (``5``)*,
``STEIM1`` (``10``) and ``STEIM2`` (``11``)*. If no encoding is given
it will be derived from the dtype of the data and the appropriate
default encoding (depicted with an asterix) will be chosen.
:type reclen: int, optional
:param reclen: Should be set to the desired data record length in bytes
which must be expressible as 2 raised to the power of X where X is
between (and including) 8 to 20.
Defaults to 4096
:type byteorder: int or str, optional
:param byteorder: Must be either ``0`` or ``'<'`` for LSBF or
little-endian, ``1`` or ``'>'`` for MBF or big-endian. ``'='`` is the
native byte order. If ``-1`` it will be passed directly to libmseed
which will also default it to big endian. Defaults to big endian.
:type sequence_number: int, optional
:param sequence_number: Must be an integer ranging between 1 and 999999.
Represents the sequence count of the first record of each Trace.
Defaults to 1.
:type flush: bool, optional
:param flush: If ``True``, all data will be packed into records. If
``False`` new records will only be created when there is enough data to
completely fill a record. Be careful with this. If in doubt, choose
``True`` which is also the default value.
:type verbose: int, optional
:param verbose: Controls verbosity, a value of ``0`` will result in no
diagnostic output.
.. note::
The ``reclen``, ``encoding``, ``byteorder`` and ``sequence_count``
keyword arguments can be set in the ``stats.mseed`` of
each :class:`~obspy.core.trace.Trace` as well as ``kwargs`` of this
function. If both are given the ``kwargs`` will be used.
The ``stats.mseed.blkt1001.timing_quality`` value will also be written
if it is set.
The ``stats.mseed.blkt1001.timing_quality`` value will also be written
if it is set.
.. rubric:: Example
>>> from obspy import read
>>> st = read()
>>> st.write('filename.mseed', format='MSEED') # doctest: +SKIP
"""
# Map flush and verbose flags.
if flush:
flush = 1
else:
flush = 0
if not verbose:
verbose = 0
if verbose is True:
verbose = 1
# Some sanity checks for the keyword arguments.
if reclen is not None and reclen not in VALID_RECORD_LENGTHS:
msg = (
"Invalid record length. The record length must be a value\n"
+ "of 2 to the power of X where 8 <= X <= 20."
)
raise ValueError(msg)
if byteorder is not None and byteorder not in [0, 1, -1]:
if byteorder == "=":
byteorder = NATIVE_BYTEORDER
# If not elif because NATIVE_BYTEORDER is '<' or '>'.
if byteorder == "<":
byteorder = 0
elif byteorder == ">":
byteorder = 1
else:
msg = "Invalid byte order. It must be either '<', '>', '=', " + "0, 1 or -1"
raise ValueError(msg)
if encoding is not None:
encoding = util._convert_and_check_encoding_for_writing(encoding)
if sequence_number is not None:
# Check sequence number type
try:
sequence_number = int(sequence_number)
# Check sequence number value
if sequence_number < 1 or sequence_number > 999999:
raise ValueError(
"Sequence number out of range. It must be "
+ " between 1 and 999999."
)
except (TypeError, ValueError):
msg = (
"Invalid sequence number. It must be an integer ranging "
+ "from 1 to 999999."
)
raise ValueError(msg)
trace_attributes = []
use_blkt_1001 = False
# The data might need to be modified. To not modify the input data keep
# references of which data to finally write.
trace_data = []
# Loop over every trace and figure out the correct settings.
for _i, trace in enumerate(stream):
# Create temporary dict for storing information while writing.
trace_attr = {}
trace_attributes.append(trace_attr)
# Figure out whether or not to use Blockette 1001. This check is done
# once to ensure that Blockette 1001 is either written for every record
# in the file or for none. It checks the starttime, the sampling rate
# and the timing quality. If starttime or sampling rate has a precision
# of more than 100 microseconds, or if timing quality is set, \
# Blockette 1001 will be written for every record.
starttime = util._convert_datetime_to_mstime(trace.stats.starttime)
if starttime % 100 != 0 or (
trace.stats.sampling_rate
and (1.0 / trace.stats.sampling_rate * HPTMODULUS) % 100 != 0
):
use_blkt_1001 = True
if (
hasattr(trace.stats, "mseed")
and hasattr(trace.stats["mseed"], "blkt1001")
and hasattr(trace.stats["mseed"]["blkt1001"], "timing_quality")
):
timing_quality = trace.stats["mseed"]["blkt1001"]["timing_quality"]
# Check timing quality type
try:
timing_quality = int(timing_quality)
if timing_quality < 0 or timing_quality > 100:
raise ValueError(
"Timing quality out of range. It must be between 0 and 100."
)
except ValueError:
msg = (
"Invalid timing quality in Stream[%i].stats." % _i
+ "mseed.timing_quality. It must be an integer ranging"
+ " from 0 to 100"
)
raise ValueError(msg)
trace_attr["timing_quality"] = timing_quality
use_blkt_1001 = True
else:
trace_attr["timing_quality"] = timing_quality = 0
if sequence_number is not None:
trace_attr["sequence_number"] = sequence_number
elif hasattr(trace.stats, "mseed") and hasattr(
trace.stats["mseed"], "sequence_number"
):
sequence_number = trace.stats["mseed"]["sequence_number"]
# Check sequence number type
try:
sequence_number = int(sequence_number)
# Check sequence number value
if sequence_number < 1 or sequence_number > 999999:
raise ValueError(
"Sequence number out of range in "
+ "Stream[%i].stats. It must be between "
+ "1 and 999999."
)
except (TypeError, ValueError):
msg = (
"Invalid sequence number in Stream[%i].stats." % _i
+ "mseed.sequence_number. It must be an integer ranging"
+ " from 1 to 999999."
)
raise ValueError(msg)
trace_attr["sequence_number"] = sequence_number
else:
trace_attr["sequence_number"] = sequence_number = 1
# Set data quality to indeterminate (= D) if it is not already set.
try:
trace_attr["dataquality"] = trace.stats["mseed"]["dataquality"].upper()
except Exception:
trace_attr["dataquality"] = "D"
# Sanity check for the dataquality to get a nice Python exception
# instead of a C error.
if trace_attr["dataquality"] not in ["D", "R", "Q", "M"]:
msg = (
"Invalid dataquality in Stream[%i].stats" % _i
+ ".mseed.dataquality\n"
+ "The dataquality for Mini-SEED must be either D, R, Q "
+ "or M. See the SEED manual for further information."
)
raise ValueError(msg)
# Check that data is of the right type.
if not isinstance(trace.data, np.ndarray):
msg = (
"Unsupported data type %s" % type(trace.data)
+ " for Stream[%i].data." % _i
)
raise ValueError(msg)
# Check if ndarray is contiguous (see #192, #193)
if not trace.data.flags.c_contiguous:
msg = (
"Detected non contiguous data array in Stream[%i]" % _i
+ ".data. Trying to fix array."
)
warnings.warn(msg)
trace.data = np.ascontiguousarray(trace.data)
# Handle the record length.
if reclen is not None:
trace_attr["reclen"] = reclen
elif hasattr(trace.stats, "mseed") and hasattr(
trace.stats.mseed, "record_length"
):
if trace.stats.mseed.record_length in VALID_RECORD_LENGTHS:
trace_attr["reclen"] = trace.stats.mseed.record_length
else:
msg = (
"Invalid record length in Stream[%i].stats." % _i
+ "mseed.reclen.\nThe record length must be a value "
+ "of 2 to the power of X where 8 <= X <= 20."
)
raise ValueError(msg)
else:
trace_attr["reclen"] = 4096
# Handle the byte order.
if byteorder is not None:
trace_attr["byteorder"] = byteorder
elif hasattr(trace.stats, "mseed") and hasattr(trace.stats.mseed, "byteorder"):
if trace.stats.mseed.byteorder in [0, 1, -1]:
trace_attr["byteorder"] = trace.stats.mseed.byteorder
elif trace.stats.mseed.byteorder == "=":
if NATIVE_BYTEORDER == "<":
trace_attr["byteorder"] = 0
else:
trace_attr["byteorder"] = 1
elif trace.stats.mseed.byteorder == "<":
trace_attr["byteorder"] = 0
elif trace.stats.mseed.byteorder == ">":
trace_attr["byteorder"] = 1
else:
msg = (
"Invalid byteorder in Stream[%i].stats." % _i
+ "mseed.byteorder. It must be either '<', '>', '=',"
+ " 0, 1 or -1"
)
raise ValueError(msg)
else:
trace_attr["byteorder"] = 1
if trace_attr["byteorder"] == -1:
if NATIVE_BYTEORDER == "<":
trace_attr["byteorder"] = 0
else:
trace_attr["byteorder"] = 1
# Handle the encoding.
trace_attr["encoding"] = None
# If encoding arrives here it is already guaranteed to be a valid
# integer encoding.
if encoding is not None:
# Check if the dtype for all traces is compatible with the enforced
# encoding.
ident, _, dtype, _ = ENCODINGS[encoding]
if trace.data.dtype.type != dtype:
msg = """
Wrong dtype for Stream[%i].data for encoding %s.
Please change the dtype of your data or use an appropriate
encoding. See the obspy.io.mseed documentation for more
information.
""" % (_i, ident)
raise Exception(msg)
trace_attr["encoding"] = encoding
elif hasattr(trace.stats, "mseed") and hasattr(trace.stats.mseed, "encoding"):
trace_attr["encoding"] = util._convert_and_check_encoding_for_writing(
trace.stats.mseed.encoding
)
# Check if the encoding matches the data's dtype.
if trace.data.dtype.type != ENCODINGS[trace_attr["encoding"]][2]:
msg = (
"The encoding specified in "
+ "trace.stats.mseed.encoding does not match the "
+ "dtype of the data.\nA suitable encoding will "
+ "be chosen."
)
warnings.warn(msg, UserWarning)
trace_attr["encoding"] = None
# automatically detect encoding if no encoding is given.
if trace_attr["encoding"] is None:
if trace.data.dtype.type == np.int32:
trace_attr["encoding"] = 11
elif trace.data.dtype.type == np.float32:
trace_attr["encoding"] = 4
elif trace.data.dtype.type == np.float64:
trace_attr["encoding"] = 5
elif trace.data.dtype.type == np.int16:
trace_attr["encoding"] = 1
elif trace.data.dtype.type == np.dtype(native_str("|S1")).type:
trace_attr["encoding"] = 0
# int64 data not supported; if possible downcast to int32, else
# create error message. After bumping up to numpy 1.9.0 this check
# can be replaced by numpy.can_cast()
elif trace.data.dtype.type == np.int64:
# check if data can be safely downcast to int32
ii32 = np.iinfo(np.int32)
if abs(trace.max()) <= ii32.max:
trace_data.append(_np_copy_astype(trace.data, np.int32))
trace_attr["encoding"] = 11
else:
msg = (
"int64 data only supported when writing MSEED if "
"it can be downcast to int32 type data."
)
raise ObsPyMSEEDError(msg)
else:
msg = "Unsupported data type %s in Stream[%i].data" % (
trace.data.dtype,
_i,
)
raise Exception(msg)
# Convert data if necessary, otherwise store references in list.
if trace_attr["encoding"] == 1:
# INT16 needs INT32 data type
trace_data.append(_np_copy_astype(trace.data, np.int32))
else:
trace_data.append(trace.data)
# Do some final sanity checks and raise a warning if a file will be written
# with more than one different encoding, record length or byte order.
encodings = {_i["encoding"] for _i in trace_attributes}
reclens = {_i["reclen"] for _i in trace_attributes}
byteorders = {_i["byteorder"] for _i in trace_attributes}
msg = (
"File will be written with more than one different %s.\n"
+ "This might have a negative influence on the compatibility "
+ "with other programs."
)
if len(encodings) != 1:
warnings.warn(msg % "encodings")
if len(reclens) != 1:
warnings.warn(msg % "record lengths")
if len(byteorders) != 1:
warnings.warn(msg % "byteorders")
# Open filehandler or use an existing file like object.
if not hasattr(filename, "write"):
f = open(filename, "wb")
else:
f = filename
# Loop over every trace and finally write it to the filehandler.
for trace, data, trace_attr in zip(stream, trace_data, trace_attributes):
if not len(data):
msg = 'Skipping empty trace "%s".' % (trace)
warnings.warn(msg)
continue
# Create C struct MSTrace.
mst = MST(trace, data, dataquality=trace_attr["dataquality"])
# Initialize packedsamples pointer for the mst_pack function
packedsamples = C.c_int()
# Callback function for mst_pack to actually write the file
def record_handler(record, reclen, _stream):
f.write(record[0:reclen])
# Define Python callback function for use in C function
rec_handler = C.CFUNCTYPE(C.c_void_p, C.POINTER(C.c_char), C.c_int, C.c_void_p)(
record_handler
)
# Fill up msr record structure, this is already contained in
# mstg, however if blk1001 is set we need it anyway
msr = clibmseed.msr_init(None)
msr.contents.network = trace.stats.network.encode("ascii", "strict")
msr.contents.station = trace.stats.station.encode("ascii", "strict")
msr.contents.location = trace.stats.location.encode("ascii", "strict")
msr.contents.channel = trace.stats.channel.encode("ascii", "strict")
msr.contents.dataquality = trace_attr["dataquality"].encode("ascii", "strict")
# Set starting sequence number
msr.contents.sequence_number = trace_attr["sequence_number"]
# Only use Blockette 1001 if necessary.
if use_blkt_1001:
# Timing quality has been set in trace_attr
size = C.sizeof(Blkt1001S)
# Only timing quality matters here, other blockette attributes will
# be filled by libmseed.msr_normalize_header
blkt_value = pack(native_str("BBBB"), trace_attr["timing_quality"], 0, 0, 0)
blkt_ptr = C.create_string_buffer(blkt_value, len(blkt_value))
# Usually returns a pointer to the added blockette in the
# blockette link chain and a NULL pointer if it fails.
# NULL pointers have a false boolean value according to the
# ctypes manual.
ret_val = clibmseed.msr_addblockette(msr, blkt_ptr, size, 1001, 0)
if bool(ret_val) is False:
clibmseed.msr_free(C.pointer(msr))
del msr
raise Exception("Error in msr_addblockette")
# Only use Blockette 100 if necessary.
# Determine if a blockette 100 will be needed to represent the input
# sample rate or if the sample rate in the fixed section of the data
# header will suffice (see ms_genfactmult in libmseed/genutils.c)
use_blkt_100 = False
_factor = C.c_int16()
_multiplier = C.c_int16()
_retval = clibmseed.ms_genfactmult(
trace.stats.sampling_rate, C.pointer(_factor), C.pointer(_multiplier)
)
# Use blockette 100 if ms_genfactmult() failed.
if _retval != 0:
use_blkt_100 = True
# Otherwise figure out if ms_genfactmult() found exact factors.
# Otherwise write blockette 100.
else:
ms_sr = clibmseed.ms_nomsamprate(_factor.value, _multiplier.value)
# It is also necessary if the libmseed calculated sampling rate
# would result in a loss of accuracy - the floating point
# comparision is on purpose here as it will always try to
# preserve all accuracy.
# Cast to float32 to not add blockette 100 for values
# that cannot be represented with 32bits.
if np.float32(ms_sr) != np.float32(trace.stats.sampling_rate):
use_blkt_100 = True
if use_blkt_100:
size = C.sizeof(Blkt100S)
blkt100 = C.c_char(b" ")
C.memset(C.pointer(blkt100), 0, size)
ret_val = clibmseed.msr_addblockette(msr, C.pointer(blkt100), size, 100, 0) # NOQA
# Usually returns a pointer to the added blockette in the
# blockette link chain and a NULL pointer if it fails.
# NULL pointers have a false boolean value according to the
# ctypes manual.
if bool(ret_val) is False:
clibmseed.msr_free(C.pointer(msr)) # NOQA
del msr # NOQA
raise Exception("Error in msr_addblockette")
# Pack mstg into a MSEED file using the callback record_handler as
# write method.
errcode = clibmseed.mst_pack(
mst.mst,
rec_handler,
None,
trace_attr["reclen"],
trace_attr["encoding"],
trace_attr["byteorder"],
C.byref(packedsamples),
flush,
verbose,
msr,
) # NOQA
if errcode == 0:
msg = (
"Did not write any data for trace '%s' even though it "
"contains data values."
) % trace
raise ValueError(msg)
if errcode == -1:
clibmseed.msr_free(C.pointer(msr)) # NOQA
del mst, msr # NOQA
raise Exception("Error in mst_pack")
# Deallocate any allocated memory.
clibmseed.msr_free(C.pointer(msr)) # NOQA
del mst, msr # NOQA
# Close if its a file handler.
if not hasattr(filename, "write"):
f.close()
|
def _write_mseed(
stream,
filename,
encoding=None,
reclen=None,
byteorder=None,
sequence_number=None,
flush=True,
verbose=0,
**_kwargs,
):
"""
Write Mini-SEED file from a Stream object.
.. warning::
This function should NOT be called directly, it registers via the
the :meth:`~obspy.core.stream.Stream.write` method of an
ObsPy :class:`~obspy.core.stream.Stream` object, call this instead.
:type stream: :class:`~obspy.core.stream.Stream`
:param stream: A Stream object.
:type filename: str
:param filename: Name of the output file or a file-like object.
:type encoding: int or str, optional
:param encoding: Should be set to one of the following supported Mini-SEED
data encoding formats: ``ASCII`` (``0``)*, ``INT16`` (``1``),
``INT32`` (``3``), ``FLOAT32`` (``4``)*, ``FLOAT64`` (``5``)*,
``STEIM1`` (``10``) and ``STEIM2`` (``11``)*. If no encoding is given
it will be derived from the dtype of the data and the appropriate
default encoding (depicted with an asterix) will be chosen.
:type reclen: int, optional
:param reclen: Should be set to the desired data record length in bytes
which must be expressible as 2 raised to the power of X where X is
between (and including) 8 to 20.
Defaults to 4096
:type byteorder: int or str, optional
:param byteorder: Must be either ``0`` or ``'<'`` for LSBF or
little-endian, ``1`` or ``'>'`` for MBF or big-endian. ``'='`` is the
native byte order. If ``-1`` it will be passed directly to libmseed
which will also default it to big endian. Defaults to big endian.
:type sequence_number: int, optional
:param sequence_number: Must be an integer ranging between 1 and 999999.
Represents the sequence count of the first record of each Trace.
Defaults to 1.
:type flush: bool, optional
:param flush: If ``True``, all data will be packed into records. If
``False`` new records will only be created when there is enough data to
completely fill a record. Be careful with this. If in doubt, choose
``True`` which is also the default value.
:type verbose: int, optional
:param verbose: Controls verbosity, a value of ``0`` will result in no
diagnostic output.
.. note::
The ``reclen``, ``encoding``, ``byteorder`` and ``sequence_count``
keyword arguments can be set in the ``stats.mseed`` of
each :class:`~obspy.core.trace.Trace` as well as ``kwargs`` of this
function. If both are given the ``kwargs`` will be used.
The ``stats.mseed.blkt1001.timing_quality`` value will also be written
if it is set.
The ``stats.mseed.blkt1001.timing_quality`` value will also be written
if it is set.
.. rubric:: Example
>>> from obspy import read
>>> st = read()
>>> st.write('filename.mseed', format='MSEED') # doctest: +SKIP
"""
# Map flush and verbose flags.
if flush:
flush = 1
else:
flush = 0
if not verbose:
verbose = 0
if verbose is True:
verbose = 1
# Some sanity checks for the keyword arguments.
if reclen is not None and reclen not in VALID_RECORD_LENGTHS:
msg = (
"Invalid record length. The record length must be a value\n"
+ "of 2 to the power of X where 8 <= X <= 20."
)
raise ValueError(msg)
if byteorder is not None and byteorder not in [0, 1, -1]:
if byteorder == "=":
byteorder = NATIVE_BYTEORDER
# If not elif because NATIVE_BYTEORDER is '<' or '>'.
if byteorder == "<":
byteorder = 0
elif byteorder == ">":
byteorder = 1
else:
msg = "Invalid byte order. It must be either '<', '>', '=', " + "0, 1 or -1"
raise ValueError(msg)
if encoding is not None:
encoding = util._convert_and_check_encoding_for_writing(encoding)
if sequence_number is not None:
# Check sequence number type
try:
sequence_number = int(sequence_number)
# Check sequence number value
if sequence_number < 1 or sequence_number > 999999:
raise ValueError(
"Sequence number out of range. It must be "
+ " between 1 and 999999."
)
except (TypeError, ValueError):
msg = (
"Invalid sequence number. It must be an integer ranging "
+ "from 1 to 999999."
)
raise ValueError(msg)
trace_attributes = []
use_blkt_1001 = False
# The data might need to be modified. To not modify the input data keep
# references of which data to finally write.
trace_data = []
# Loop over every trace and figure out the correct settings.
for _i, trace in enumerate(stream):
# Create temporary dict for storing information while writing.
trace_attr = {}
trace_attributes.append(trace_attr)
# Figure out whether or not to use Blockette 1001. This check is done
# once to ensure that Blockette 1001 is either written for every record
# in the file or for none. It checks the starttime, the sampling rate
# and the timing quality. If starttime or sampling rate has a precision
# of more than 100 microseconds, or if timing quality is set, \
# Blockette 1001 will be written for every record.
starttime = util._convert_datetime_to_mstime(trace.stats.starttime)
if (
starttime % 100 != 0
or (1.0 / trace.stats.sampling_rate * HPTMODULUS) % 100 != 0
):
use_blkt_1001 = True
if (
hasattr(trace.stats, "mseed")
and hasattr(trace.stats["mseed"], "blkt1001")
and hasattr(trace.stats["mseed"]["blkt1001"], "timing_quality")
):
timing_quality = trace.stats["mseed"]["blkt1001"]["timing_quality"]
# Check timing quality type
try:
timing_quality = int(timing_quality)
if timing_quality < 0 or timing_quality > 100:
raise ValueError(
"Timing quality out of range. It must be between 0 and 100."
)
except ValueError:
msg = (
"Invalid timing quality in Stream[%i].stats." % _i
+ "mseed.timing_quality. It must be an integer ranging"
+ " from 0 to 100"
)
raise ValueError(msg)
trace_attr["timing_quality"] = timing_quality
use_blkt_1001 = True
else:
trace_attr["timing_quality"] = timing_quality = 0
if sequence_number is not None:
trace_attr["sequence_number"] = sequence_number
elif hasattr(trace.stats, "mseed") and hasattr(
trace.stats["mseed"], "sequence_number"
):
sequence_number = trace.stats["mseed"]["sequence_number"]
# Check sequence number type
try:
sequence_number = int(sequence_number)
# Check sequence number value
if sequence_number < 1 or sequence_number > 999999:
raise ValueError(
"Sequence number out of range in "
+ "Stream[%i].stats. It must be between "
+ "1 and 999999."
)
except (TypeError, ValueError):
msg = (
"Invalid sequence number in Stream[%i].stats." % _i
+ "mseed.sequence_number. It must be an integer ranging"
+ " from 1 to 999999."
)
raise ValueError(msg)
trace_attr["sequence_number"] = sequence_number
else:
trace_attr["sequence_number"] = sequence_number = 1
# Set data quality to indeterminate (= D) if it is not already set.
try:
trace_attr["dataquality"] = trace.stats["mseed"]["dataquality"].upper()
except Exception:
trace_attr["dataquality"] = "D"
# Sanity check for the dataquality to get a nice Python exception
# instead of a C error.
if trace_attr["dataquality"] not in ["D", "R", "Q", "M"]:
msg = (
"Invalid dataquality in Stream[%i].stats" % _i
+ ".mseed.dataquality\n"
+ "The dataquality for Mini-SEED must be either D, R, Q "
+ "or M. See the SEED manual for further information."
)
raise ValueError(msg)
# Check that data is of the right type.
if not isinstance(trace.data, np.ndarray):
msg = (
"Unsupported data type %s" % type(trace.data)
+ " for Stream[%i].data." % _i
)
raise ValueError(msg)
# Check if ndarray is contiguous (see #192, #193)
if not trace.data.flags.c_contiguous:
msg = (
"Detected non contiguous data array in Stream[%i]" % _i
+ ".data. Trying to fix array."
)
warnings.warn(msg)
trace.data = np.ascontiguousarray(trace.data)
# Handle the record length.
if reclen is not None:
trace_attr["reclen"] = reclen
elif hasattr(trace.stats, "mseed") and hasattr(
trace.stats.mseed, "record_length"
):
if trace.stats.mseed.record_length in VALID_RECORD_LENGTHS:
trace_attr["reclen"] = trace.stats.mseed.record_length
else:
msg = (
"Invalid record length in Stream[%i].stats." % _i
+ "mseed.reclen.\nThe record length must be a value "
+ "of 2 to the power of X where 8 <= X <= 20."
)
raise ValueError(msg)
else:
trace_attr["reclen"] = 4096
# Handle the byte order.
if byteorder is not None:
trace_attr["byteorder"] = byteorder
elif hasattr(trace.stats, "mseed") and hasattr(trace.stats.mseed, "byteorder"):
if trace.stats.mseed.byteorder in [0, 1, -1]:
trace_attr["byteorder"] = trace.stats.mseed.byteorder
elif trace.stats.mseed.byteorder == "=":
if NATIVE_BYTEORDER == "<":
trace_attr["byteorder"] = 0
else:
trace_attr["byteorder"] = 1
elif trace.stats.mseed.byteorder == "<":
trace_attr["byteorder"] = 0
elif trace.stats.mseed.byteorder == ">":
trace_attr["byteorder"] = 1
else:
msg = (
"Invalid byteorder in Stream[%i].stats." % _i
+ "mseed.byteorder. It must be either '<', '>', '=',"
+ " 0, 1 or -1"
)
raise ValueError(msg)
else:
trace_attr["byteorder"] = 1
if trace_attr["byteorder"] == -1:
if NATIVE_BYTEORDER == "<":
trace_attr["byteorder"] = 0
else:
trace_attr["byteorder"] = 1
# Handle the encoding.
trace_attr["encoding"] = None
# If encoding arrives here it is already guaranteed to be a valid
# integer encoding.
if encoding is not None:
# Check if the dtype for all traces is compatible with the enforced
# encoding.
ident, _, dtype, _ = ENCODINGS[encoding]
if trace.data.dtype.type != dtype:
msg = """
Wrong dtype for Stream[%i].data for encoding %s.
Please change the dtype of your data or use an appropriate
encoding. See the obspy.io.mseed documentation for more
information.
""" % (_i, ident)
raise Exception(msg)
trace_attr["encoding"] = encoding
elif hasattr(trace.stats, "mseed") and hasattr(trace.stats.mseed, "encoding"):
trace_attr["encoding"] = util._convert_and_check_encoding_for_writing(
trace.stats.mseed.encoding
)
# Check if the encoding matches the data's dtype.
if trace.data.dtype.type != ENCODINGS[trace_attr["encoding"]][2]:
msg = (
"The encoding specified in "
+ "trace.stats.mseed.encoding does not match the "
+ "dtype of the data.\nA suitable encoding will "
+ "be chosen."
)
warnings.warn(msg, UserWarning)
trace_attr["encoding"] = None
# automatically detect encoding if no encoding is given.
if trace_attr["encoding"] is None:
if trace.data.dtype.type == np.int32:
trace_attr["encoding"] = 11
elif trace.data.dtype.type == np.float32:
trace_attr["encoding"] = 4
elif trace.data.dtype.type == np.float64:
trace_attr["encoding"] = 5
elif trace.data.dtype.type == np.int16:
trace_attr["encoding"] = 1
elif trace.data.dtype.type == np.dtype(native_str("|S1")).type:
trace_attr["encoding"] = 0
# int64 data not supported; if possible downcast to int32, else
# create error message. After bumping up to numpy 1.9.0 this check
# can be replaced by numpy.can_cast()
elif trace.data.dtype.type == np.int64:
# check if data can be safely downcast to int32
ii32 = np.iinfo(np.int32)
if abs(trace.max()) <= ii32.max:
trace_data.append(_np_copy_astype(trace.data, np.int32))
trace_attr["encoding"] = 11
else:
msg = (
"int64 data only supported when writing MSEED if "
"it can be downcast to int32 type data."
)
raise ObsPyMSEEDError(msg)
else:
msg = "Unsupported data type %s in Stream[%i].data" % (
trace.data.dtype,
_i,
)
raise Exception(msg)
# Convert data if necessary, otherwise store references in list.
if trace_attr["encoding"] == 1:
# INT16 needs INT32 data type
trace_data.append(_np_copy_astype(trace.data, np.int32))
else:
trace_data.append(trace.data)
# Do some final sanity checks and raise a warning if a file will be written
# with more than one different encoding, record length or byte order.
encodings = {_i["encoding"] for _i in trace_attributes}
reclens = {_i["reclen"] for _i in trace_attributes}
byteorders = {_i["byteorder"] for _i in trace_attributes}
msg = (
"File will be written with more than one different %s.\n"
+ "This might have a negative influence on the compatibility "
+ "with other programs."
)
if len(encodings) != 1:
warnings.warn(msg % "encodings")
if len(reclens) != 1:
warnings.warn(msg % "record lengths")
if len(byteorders) != 1:
warnings.warn(msg % "byteorders")
# Open filehandler or use an existing file like object.
if not hasattr(filename, "write"):
f = open(filename, "wb")
else:
f = filename
# Loop over every trace and finally write it to the filehandler.
for trace, data, trace_attr in zip(stream, trace_data, trace_attributes):
if not len(data):
msg = 'Skipping empty trace "%s".' % (trace)
warnings.warn(msg)
continue
# Create C struct MSTrace.
mst = MST(trace, data, dataquality=trace_attr["dataquality"])
# Initialize packedsamples pointer for the mst_pack function
packedsamples = C.c_int()
# Callback function for mst_pack to actually write the file
def record_handler(record, reclen, _stream):
f.write(record[0:reclen])
# Define Python callback function for use in C function
rec_handler = C.CFUNCTYPE(C.c_void_p, C.POINTER(C.c_char), C.c_int, C.c_void_p)(
record_handler
)
# Fill up msr record structure, this is already contained in
# mstg, however if blk1001 is set we need it anyway
msr = clibmseed.msr_init(None)
msr.contents.network = trace.stats.network.encode("ascii", "strict")
msr.contents.station = trace.stats.station.encode("ascii", "strict")
msr.contents.location = trace.stats.location.encode("ascii", "strict")
msr.contents.channel = trace.stats.channel.encode("ascii", "strict")
msr.contents.dataquality = trace_attr["dataquality"].encode("ascii", "strict")
# Set starting sequence number
msr.contents.sequence_number = trace_attr["sequence_number"]
# Only use Blockette 1001 if necessary.
if use_blkt_1001:
# Timing quality has been set in trace_attr
size = C.sizeof(Blkt1001S)
# Only timing quality matters here, other blockette attributes will
# be filled by libmseed.msr_normalize_header
blkt_value = pack(native_str("BBBB"), trace_attr["timing_quality"], 0, 0, 0)
blkt_ptr = C.create_string_buffer(blkt_value, len(blkt_value))
# Usually returns a pointer to the added blockette in the
# blockette link chain and a NULL pointer if it fails.
# NULL pointers have a false boolean value according to the
# ctypes manual.
ret_val = clibmseed.msr_addblockette(msr, blkt_ptr, size, 1001, 0)
if bool(ret_val) is False:
clibmseed.msr_free(C.pointer(msr))
del msr
raise Exception("Error in msr_addblockette")
# Only use Blockette 100 if necessary.
# Determine if a blockette 100 will be needed to represent the input
# sample rate or if the sample rate in the fixed section of the data
# header will suffice (see ms_genfactmult in libmseed/genutils.c)
use_blkt_100 = False
_factor = C.c_int16()
_multiplier = C.c_int16()
_retval = clibmseed.ms_genfactmult(
trace.stats.sampling_rate, C.pointer(_factor), C.pointer(_multiplier)
)
# Use blockette 100 if ms_genfactmult() failed.
if _retval != 0:
use_blkt_100 = True
# Otherwise figure out if ms_genfactmult() found exact factors.
# Otherwise write blockette 100.
else:
ms_sr = clibmseed.ms_nomsamprate(_factor.value, _multiplier.value)
# It is also necessary if the libmseed calculated sampling rate
# would result in a loss of accuracy - the floating point
# comparision is on purpose here as it will always try to
# preserve all accuracy.
# Cast to float32 to not add blockette 100 for values
# that cannot be represented with 32bits.
if np.float32(ms_sr) != np.float32(trace.stats.sampling_rate):
use_blkt_100 = True
if use_blkt_100:
size = C.sizeof(Blkt100S)
blkt100 = C.c_char(b" ")
C.memset(C.pointer(blkt100), 0, size)
ret_val = clibmseed.msr_addblockette(msr, C.pointer(blkt100), size, 100, 0) # NOQA
# Usually returns a pointer to the added blockette in the
# blockette link chain and a NULL pointer if it fails.
# NULL pointers have a false boolean value according to the
# ctypes manual.
if bool(ret_val) is False:
clibmseed.msr_free(C.pointer(msr)) # NOQA
del msr # NOQA
raise Exception("Error in msr_addblockette")
# Pack mstg into a MSEED file using the callback record_handler as
# write method.
errcode = clibmseed.mst_pack(
mst.mst,
rec_handler,
None,
trace_attr["reclen"],
trace_attr["encoding"],
trace_attr["byteorder"],
C.byref(packedsamples),
flush,
verbose,
msr,
) # NOQA
if errcode == 0:
msg = (
"Did not write any data for trace '%s' even though it "
"contains data values."
) % trace
raise ValueError(msg)
if errcode == -1:
clibmseed.msr_free(C.pointer(msr)) # NOQA
del mst, msr # NOQA
raise Exception("Error in mst_pack")
# Deallocate any allocated memory.
clibmseed.msr_free(C.pointer(msr)) # NOQA
del mst, msr # NOQA
# Close if its a file handler.
if not hasattr(filename, "write"):
f.close()
|
https://github.com/obspy/obspy/issues/2488
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "//anaconda2/lib/python2.7/site-packages/obspy/core/stream.py", line 1443, in write
write_format(self, filename, **kwargs)
File "//anaconda2/lib/python2.7/site-packages/obspy/io/mseed/core.py", line 626, in _write_mseed
(1.0 / trace.stats.sampling_rate * HPTMODULUS) % 100 != 0:
ZeroDivisionError: float division by zero
|
ZeroDivisionError
|
def recalculate_overall_sensitivity(self, frequency=None):
"""
Recalculates the overall sensitivity.
:param frequency: Choose frequency at which to calculate the
sensitivity. If not given it will be chosen automatically.
"""
if not hasattr(self, "instrument_sensitivity"):
msg = (
"Could not find an instrument sensitivity - will not "
"recalculate the overall sensitivity."
)
raise ValueError(msg)
if not self.instrument_sensitivity.input_units:
msg = (
"Could not determine input units - will not "
"recalculate the overall sensitivity."
)
raise ValueError(msg)
i_u = self.instrument_sensitivity.input_units
unit_map = {
"DISP": ["M"],
"VEL": ["M/S", "M/SEC"],
"ACC": ["M/S**2", "M/(S**2)", "M/SEC**2", "M/(SEC**2)", "M/S/S"],
}
unit = None
for key, value in unit_map.items():
if i_u and i_u.upper() in value:
unit = key
if not unit:
msg = (
"ObsPy does not know how to map unit '%s' to "
"displacement, velocity, or acceleration - overall "
"sensitivity will not be recalculated."
) % i_u
raise ValueError(msg)
# Determine frequency if not given.
if frequency is None:
# lookup normalization frequency of sensor's first stage it should
# be in the flat part of the response
stage_one = self.response_stages[0]
try:
frequency = stage_one.normalization_frequency
except AttributeError:
pass
for stage in self.response_stages[::-1]:
# determine sampling rate
try:
sampling_rate = (
stage.decimation_input_sample_rate / stage.decimation_factor
)
break
except Exception:
continue
else:
sampling_rate = None
if sampling_rate:
# if sensor's normalization frequency is above 0.5 * nyquist,
# use that instead (e.g. to avoid computing an overall
# sensitivity above nyquist)
nyquist = sampling_rate / 2.0
if frequency:
frequency = min(frequency, nyquist / 2.0)
else:
frequency = nyquist / 2.0
if frequency is None:
msg = (
"Could not automatically determine a suitable frequency "
"at which to calculate the sensitivity. The overall "
"sensitivity will not be recalculated."
)
raise ValueError(msg)
freq, gain = self._get_overall_sensitivity_and_gain(
output=unit, frequency=float(frequency)
)
self.instrument_sensitivity.value = gain
self.instrument_sensitivity.frequency = freq
|
def recalculate_overall_sensitivity(self, frequency=None):
"""
Recalculates the overall sensitivity.
:param frequency: Choose frequency at which to calculate the
sensitivity. If not given it will be chosen automatically.
"""
if not hasattr(self, "instrument_sensitivity"):
msg = (
"Could not find an instrument sensitivity - will not "
"recalculate the overall sensitivity."
)
raise ValueError(msg)
if not self.instrument_sensitivity.input_units:
msg = (
"Could not determine input units - will not "
"recalculate the overall sensitivity."
)
raise ValueError(msg)
i_u = self.instrument_sensitivity.input_units
unit_map = {
"DISP": ["M"],
"VEL": ["M/S", "M/SEC"],
"ACC": ["M/S**2", "M/(S**2)", "M/SEC**2", "M/(SEC**2)", "M/S/S"],
}
unit = None
for key, value in unit_map.items():
if i_u and i_u.upper() in value:
unit = key
if not unit:
msg = (
"ObsPy does not know how to map unit '%s' to "
"displacement, velocity, or acceleration - overall "
"sensitivity will not be recalculated."
) % i_u
raise ValueError(msg)
# Determine frequency if not given.
if frequency is None:
# lookup normalization frequency of sensor's first stage it should
# be in the flat part of the response
stage_one = self.response_stages[0]
try:
frequency = stage_one.normalization_frequency
except AttributeError:
pass
for stage in self.response_stages[::-1]:
# determine sampling rate
try:
sampling_rate = (
stage.decimation_input_sample_rate / stage.decimation_factor
)
break
except Exception:
continue
else:
sampling_rate = None
if sampling_rate:
# if sensor's normalization frequency is above 0.5 * nyquist,
# use that instead (e.g. to avoid computing an overall
# sensitivity above nyquist)
nyquist = sampling_rate / 2.0
if frequency:
frequency = min(frequency, nyquist / 2.0)
else:
frequency = nyquist / 2.0
if frequency is None:
msg = (
"Could not automatically determine a suitable frequency "
"at which to calculate the sensitivity. The overall "
"sensitivity will not be recalculated."
)
raise ValueError(msg)
freq, gain = self._get_overall_sensitivity_and_gain(
output=unit, frequency=frequency
)
self.instrument_sensitivity.value = gain
self.instrument_sensitivity.frequency = freq
|
https://github.com/obspy/obspy/issues/2338
|
In [0]: from obspy import read_inventory, UTCDateTime as UTC
In [1]: inv = read_inventory()
In [4]: rsp = inv.get_response('BW.RJOB..EHZ', UTC())
In [5]: rsp.recalculate_overall_sensitivity(5)
---------------------------------------------------------------------------
ArgumentError Traceback (most recent call last)
<ipython-input-5-9892716a93fe> in <module>()
----> 1 rsp.recalculate_overall_sensitivity(5)
~/dev/obspy/obspy/core/inventory/response.py in recalculate_overall_sensitivity(self, frequency)
1029
1030 freq, gain = self._get_overall_sensitivity_and_gain(
-> 1031 output=unit, frequency=frequency)
1032
1033 self.instrument_sensitivity.value = gain
~/dev/obspy/obspy/core/inventory/response.py in _get_overall_sensitivity_and_gain(self, frequency, output)
1064 response_at_frequency = self._call_eval_resp_for_frequencies(
1065 frequencies=[frequency], output=output,
-> 1066 hide_sensitivity_mismatch_warning=True)[0][0]
1067 overall_sensitivity = abs(response_at_frequency)
1068 return frequency, overall_sensitivity
~/dev/obspy/obspy/core/inventory/response.py in _call_eval_resp_for_frequencies(self, frequencies, output, start_stage, end_stage, hide_sensitivity_mismatch_warning)
1574 rc = clibevresp._obspy_calc_resp(C.byref(chan), frequencies,
1575 len(frequencies),
-> 1576 output, out_units, -1, 0, 0)
1577 if rc:
1578 e, m = ew.ENUM_ERROR_CODES[rc]
ArgumentError: argument 2: <class 'TypeError'>: array must have data type float64
|
ArgumentError
|
def _read_fixed_header(self):
"""
Reads the fixed header of the Mini-SEED file and writes all entries to
self.fixed_header, a dictionary.
"""
# Init empty fixed header dictionary. Use an ordered dictionary to
# achieve the same order as in the Mini-SEED manual.
self.fixed_header = OrderedDict()
# Read and unpack.
self.file.seek(self.record_offset, 0)
fixed_header = self.file.read(48)
encoding = native_str("%s20c2H3Bx2H2h4Bl2h" % self.endian)
try:
header_item = unpack(encoding, fixed_header)
except Exception:
if len(fixed_header) == 0:
msg = "Unexpected end of file."
raise IOError(msg)
raise
# Write values to dictionary.
self.fixed_header["Sequence number"] = int(
"".join(x.decode("ascii", errors="replace") for x in header_item[:6])
)
self.fixed_header["Data header/quality indicator"] = header_item[6].decode(
"ascii", errors="replace"
)
self.fixed_header["Station identifier code"] = "".join(
x.decode("ascii", errors="replace") for x in header_item[8:13]
).strip()
self.fixed_header["Location identifier"] = "".join(
x.decode("ascii", errors="replace") for x in header_item[13:15]
).strip()
self.fixed_header["Channel identifier"] = "".join(
x.decode("ascii", errors="replace") for x in header_item[15:18]
).strip()
self.fixed_header["Network code"] = "".join(
x.decode("ascii", errors="replace") for x in header_item[18:20]
).strip()
# Construct the starttime. This is only the starttime in the fixed
# header without any offset. See page 31 of the SEED manual for the
# time definition.
self.fixed_header["Record start time"] = UTCDateTime(
year=header_item[20],
julday=header_item[21],
hour=header_item[22],
minute=header_item[23],
second=header_item[24],
microsecond=header_item[25] * 100,
)
self.fixed_header["Number of samples"] = int(header_item[26])
self.fixed_header["Sample rate factor"] = int(header_item[27])
self.fixed_header["Sample rate multiplier"] = int(header_item[28])
self.fixed_header["Activity flags"] = int(header_item[29])
self.fixed_header["I/O and clock flags"] = int(header_item[30])
self.fixed_header["Data quality flags"] = int(header_item[31])
self.fixed_header["Number of blockettes that follow"] = int(header_item[32])
self.fixed_header["Time correction"] = int(header_item[33])
self.fixed_header["Beginning of data"] = int(header_item[34])
self.fixed_header["First blockette"] = int(header_item[35])
|
def _read_fixed_header(self):
"""
Reads the fixed header of the Mini-SEED file and writes all entries to
self.fixed_header, a dictionary.
"""
# Init empty fixed header dictionary. Use an ordered dictionary to
# achieve the same order as in the Mini-SEED manual.
self.fixed_header = OrderedDict()
# Read and unpack.
self.file.seek(self.record_offset, 0)
fixed_header = self.file.read(48)
encoding = native_str("%s20c2H3Bx4H4Bl2H" % self.endian)
try:
header_item = unpack(encoding, fixed_header)
except Exception:
if len(fixed_header) == 0:
msg = "Unexpected end of file."
raise IOError(msg)
raise
# Write values to dictionary.
self.fixed_header["Sequence number"] = int(
"".join(x.decode("ascii", errors="replace") for x in header_item[:6])
)
self.fixed_header["Data header/quality indicator"] = header_item[6].decode(
"ascii", errors="replace"
)
self.fixed_header["Station identifier code"] = "".join(
x.decode("ascii", errors="replace") for x in header_item[8:13]
).strip()
self.fixed_header["Location identifier"] = "".join(
x.decode("ascii", errors="replace") for x in header_item[13:15]
).strip()
self.fixed_header["Channel identifier"] = "".join(
x.decode("ascii", errors="replace") for x in header_item[15:18]
).strip()
self.fixed_header["Network code"] = "".join(
x.decode("ascii", errors="replace") for x in header_item[18:20]
).strip()
# Construct the starttime. This is only the starttime in the fixed
# header without any offset. See page 31 of the SEED manual for the
# time definition.
self.fixed_header["Record start time"] = UTCDateTime(
year=header_item[20],
julday=header_item[21],
hour=header_item[22],
minute=header_item[23],
second=header_item[24],
microsecond=header_item[25] * 100,
)
self.fixed_header["Number of samples"] = int(header_item[26])
self.fixed_header["Sample rate factor"] = int(header_item[27])
self.fixed_header["Sample rate multiplier"] = int(header_item[28])
self.fixed_header["Activity flags"] = int(header_item[29])
self.fixed_header["I/O and clock flags"] = int(header_item[30])
self.fixed_header["Data quality flags"] = int(header_item[31])
self.fixed_header["Number of blockettes that follow"] = int(header_item[32])
self.fixed_header["Time correction"] = int(header_item[33])
self.fixed_header["Beginning of data"] = int(header_item[34])
self.fixed_header["First blockette"] = int(header_item[35])
|
https://github.com/obspy/obspy/issues/2030
|
Traceback (most recent call last):
File "./10_downloader.py", line 122, in <module>
stationxml_storage=stationxml_storage)
File "/home/legovini/obspy/obspy/clients/fdsn/mass_downloader/mass_downloader.py", line 201, in download
threads_per_client=threads_per_client)
File "/home/legovini/obspy/obspy/clients/fdsn/mass_downloader/download_helpers.py", line 857, in download_mseed
[(self.client, self.client_name, chunk) for chunk in chunks])
File "/home/legovini/miniconda3/envs/sismo/lib/python3.6/multiprocessing/pool.py", line 266, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/home/legovini/miniconda3/envs/sismo/lib/python3.6/multiprocessing/pool.py", line 644, in get
raise self._value
File "/home/legovini/miniconda3/envs/sismo/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/home/legovini/miniconda3/envs/sismo/lib/python3.6/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "/home/legovini/obspy/obspy/clients/fdsn/mass_downloader/download_helpers.py", line 842, in star_download_mseed
*args, logger=self.logger)
File "/home/legovini/obspy/obspy/clients/fdsn/mass_downloader/utils.py", line 245, in download_and_split_mseed_bulk
c=filenames[channel_id])
File "/home/legovini/obspy/obspy/clients/fdsn/mass_downloader/utils.py", line 143, in get_filename
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def __str__(self):
"""
Set the string representation of the class.
"""
if self.filename:
filename = self.filename
else:
filename = "Unknown"
if self.endian == "<":
endian = "Little Endian"
else:
endian = "Big Endian"
if self.did_goto:
goto_info = (
" (records were skipped, number is wrong in case of differing record sizes)"
)
else:
goto_info = ""
ret_val = (
"FILE: %s\nRecord Number: %i%s\n"
+ "Record Offset: %i byte\n"
+ "Header Endianness: %s\n\n"
) % (filename, self.record_number, goto_info, self.record_offset, endian)
ret_val += "FIXED SECTION OF DATA HEADER\n"
for key in self.fixed_header.keys():
# Don't print empty values to ease testing.
if self.fixed_header[key] != "":
ret_val += "\t%s: %s\n" % (key, self.fixed_header[key])
else:
ret_val += "\t%s:\n" % (key)
ret_val += "\nBLOCKETTES\n"
for key in self.blockettes.keys():
ret_val += "\t%i:" % key
if not len(self.blockettes[key]):
ret_val += "\tNOT YET IMPLEMENTED\n"
for _i, blkt_key in enumerate(self.blockettes[key].keys()):
if _i == 0:
tabs = "\t"
else:
tabs = "\t\t"
ret_val += "%s%s: %s\n" % (tabs, blkt_key, self.blockettes[key][blkt_key])
ret_val += "\nCALCULATED VALUES\n"
ret_val += "\tCorrected Starttime: %s\n" % self.corrected_starttime
return ret_val
|
def __str__(self):
"""
Set the string representation of the class.
"""
if self.filename:
filename = self.filename
else:
filename = "Unknown"
if self.endian == "<":
endian = "Little Endian"
else:
endian = "Big Endian"
if self.did_goto:
goto_info = (
" (records were skipped, number is wrong in case of differing record sizes)"
)
else:
goto_info = ""
ret_val = (
"FILE: %s\nRecord Number: %i%s\n"
+ "Record Offset: %i byte\n"
+ "Header Endianness: %s\n\n"
) % (filename, self.record_number, goto_info, self.record_offset, endian)
ret_val += "FIXED SECTION OF DATA HEADER\n"
for key in self.fixed_header.keys():
ret_val += "\t%s: %s\n" % (key, self.fixed_header[key])
ret_val += "\nBLOCKETTES\n"
for key in self.blockettes.keys():
ret_val += "\t%i:" % key
if not len(self.blockettes[key]):
ret_val += "\tNOT YET IMPLEMENTED\n"
for _i, blkt_key in enumerate(self.blockettes[key].keys()):
if _i == 0:
tabs = "\t"
else:
tabs = "\t\t"
ret_val += "%s%s: %s\n" % (tabs, blkt_key, self.blockettes[key][blkt_key])
ret_val += "\nCALCULATED VALUES\n"
ret_val += "\tCorrected Starttime: %s\n" % self.corrected_starttime
return ret_val
|
https://github.com/obspy/obspy/issues/2030
|
Traceback (most recent call last):
File "./10_downloader.py", line 122, in <module>
stationxml_storage=stationxml_storage)
File "/home/legovini/obspy/obspy/clients/fdsn/mass_downloader/mass_downloader.py", line 201, in download
threads_per_client=threads_per_client)
File "/home/legovini/obspy/obspy/clients/fdsn/mass_downloader/download_helpers.py", line 857, in download_mseed
[(self.client, self.client_name, chunk) for chunk in chunks])
File "/home/legovini/miniconda3/envs/sismo/lib/python3.6/multiprocessing/pool.py", line 266, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/home/legovini/miniconda3/envs/sismo/lib/python3.6/multiprocessing/pool.py", line 644, in get
raise self._value
File "/home/legovini/miniconda3/envs/sismo/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/home/legovini/miniconda3/envs/sismo/lib/python3.6/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "/home/legovini/obspy/obspy/clients/fdsn/mass_downloader/download_helpers.py", line 842, in star_download_mseed
*args, logger=self.logger)
File "/home/legovini/obspy/obspy/clients/fdsn/mass_downloader/utils.py", line 245, in download_and_split_mseed_bulk
c=filenames[channel_id])
File "/home/legovini/obspy/obspy/clients/fdsn/mass_downloader/utils.py", line 143, in get_filename
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def _get_record_information(file_object, offset=0, endian=None):
"""
Searches the first MiniSEED record stored in file_object at the current
position and returns some information about it.
If offset is given, the MiniSEED record is assumed to start at current
position + offset in file_object.
:param endian: If given, the byte order will be enforced. Can be either "<"
or ">". If None, it will be determined automatically.
Defaults to None.
"""
initial_position = file_object.tell()
record_start = initial_position
samp_rate = None
info = {}
# Apply the offset.
if offset:
file_object.seek(offset, 1)
record_start += offset
# Get the size of the buffer.
file_object.seek(0, 2)
info["filesize"] = int(file_object.tell() - record_start)
file_object.seek(record_start, 0)
_code = file_object.read(8)[6:7]
# Reset the offset if starting somewhere in the middle of the file.
if info["filesize"] % 128 != 0:
# if a multiple of minimal record length 256
record_start = 0
elif _code not in [b"D", b"R", b"Q", b"M", b" "]:
# if valid data record start at all starting with D, R, Q or M
record_start = 0
# Might be a noise record or completely empty.
elif _code == b" ":
try:
_t = file_object.read(120).decode().strip()
except Exception:
raise ValueError("Invalid MiniSEED file.")
if not _t:
info = _get_record_information(file_object=file_object, endian=endian)
file_object.seek(initial_position, 0)
return info
else:
raise ValueError("Invalid MiniSEED file.")
file_object.seek(record_start, 0)
# check if full SEED or MiniSEED
if file_object.read(8)[6:7] == b"V":
# found a full SEED record - seek first MiniSEED record
# search blockette 005, 008 or 010 which contain the record length
blockette_id = file_object.read(3)
while blockette_id not in [b"010", b"008", b"005"]:
if not blockette_id.startswith(b"0"):
msg = (
"SEED Volume Index Control Headers: blockette 0xx"
+ " expected, got %s"
)
raise Exception(msg % blockette_id)
# get length and jump to end of current blockette
blockette_len = int(file_object.read(4))
file_object.seek(blockette_len - 7, 1)
# read next blockette id
blockette_id = file_object.read(3)
# Skip the next bytes containing length of the blockette and version
file_object.seek(8, 1)
# get record length
rec_len = pow(2, int(file_object.read(2)))
# reset file pointer
file_object.seek(record_start, 0)
# cycle through file using record length until first data record found
while file_object.read(7)[6:7] not in [b"D", b"R", b"Q", b"M"]:
record_start += rec_len
file_object.seek(record_start, 0)
# Jump to the network, station, location and channel codes.
file_object.seek(record_start + 8, 0)
data = file_object.read(12)
info["station"] = _decode_header_field("station", data[:5].strip())
info["location"] = _decode_header_field("location", data[5:7].strip())
info["channel"] = _decode_header_field("channel", data[7:10].strip())
info["network"] = _decode_header_field("network", data[10:12].strip())
# Use the date to figure out the byte order.
file_object.seek(record_start + 20, 0)
# Capital letters indicate unsigned quantities.
data = file_object.read(28)
def fmt(s):
return native_str("%sHHBBBxHHhhBBBxlxxH" % s)
def _parse_time(values):
if not (1 <= values[1] <= 366):
msg = "julday out of bounds (wrong endian?): {!s}".format(values[1])
raise InternalMSEEDParseTimeError(msg)
# The spec says values[5] (.0001 seconds) must be between 0-9999 but
# we've encountered files which have a value of 10000. We interpret
# this as an additional second. The approach here is general enough
# to work for any value of values[5].
msec = values[5] * 100
offset = msec // 1000000
if offset:
warnings.warn(
"Record contains a fractional seconds (.0001 secs) of %i - "
"the maximum strictly allowed value is 9999. It will be "
"interpreted as one or more additional seconds." % values[5],
category=UserWarning,
)
try:
t = (
UTCDateTime(
year=values[0],
julday=values[1],
hour=values[2],
minute=values[3],
second=values[4],
microsecond=msec % 1000000,
)
+ offset
)
except TypeError:
msg = "Problem decoding time (wrong endian?)"
raise InternalMSEEDParseTimeError(msg)
return t
if endian is None:
try:
endian = ">"
values = unpack(fmt(endian), data)
starttime = _parse_time(values)
except InternalMSEEDParseTimeError:
endian = "<"
values = unpack(fmt(endian), data)
starttime = _parse_time(values)
else:
values = unpack(fmt(endian), data)
try:
starttime = _parse_time(values)
except InternalMSEEDParseTimeError:
msg = "Invalid starttime found. The passed byte order is likely wrong."
raise ValueError(msg)
npts = values[6]
info["npts"] = npts
samp_rate_factor = values[7]
samp_rate_mult = values[8]
info["activity_flags"] = values[9]
# Bit 1 of the activity flags.
time_correction_applied = bool(info["activity_flags"] & 2)
info["io_and_clock_flags"] = values[10]
info["data_quality_flags"] = values[11]
info["time_correction"] = values[12]
time_correction = values[12]
blkt_offset = values[13]
# Correct the starttime if applicable.
if (time_correction_applied is False) and time_correction:
# Time correction is in units of 0.0001 seconds.
starttime += time_correction * 0.0001
# Traverse the blockettes and parse Blockettes 100, 500, 1000 and/or 1001
# if any of those is found.
while blkt_offset:
file_object.seek(record_start + blkt_offset, 0)
blkt_type, next_blkt = unpack(native_str("%sHH" % endian), file_object.read(4))
if next_blkt != 0 and (next_blkt < 4 or next_blkt - 4 <= blkt_offset):
msg = (
"Invalid blockette offset (%d) less than or equal to "
"current offset (%d)"
) % (next_blkt, blkt_offset)
raise ValueError(msg)
blkt_offset = next_blkt
# Parse in order of likeliness.
if blkt_type == 1000:
encoding, word_order, record_length = unpack(
native_str("%sBBB" % endian), file_object.read(3)
)
if word_order not in ENDIAN:
msg = (
'Invalid word order "%s" in blockette 1000 for '
"record with ID %s.%s.%s.%s at offset %i."
) % (
str(word_order),
info["network"],
info["station"],
info["location"],
info["channel"],
offset,
)
warnings.warn(msg, UserWarning)
elif ENDIAN[word_order] != endian:
msg = "Inconsistent word order."
warnings.warn(msg, UserWarning)
info["encoding"] = encoding
info["record_length"] = 2**record_length
elif blkt_type == 1001:
info["timing_quality"], mu_sec = unpack(
native_str("%sBb" % endian), file_object.read(2)
)
starttime += float(mu_sec) / 1e6
elif blkt_type == 500:
file_object.seek(14, 1)
mu_sec = unpack(native_str("%sb" % endian), file_object.read(1))[0]
starttime += float(mu_sec) / 1e6
elif blkt_type == 100:
samp_rate = unpack(native_str("%sf" % endian), file_object.read(4))[0]
# No blockette 1000 found.
if "record_length" not in info:
file_object.seek(record_start, 0)
# Read 16 kb - should be a safe maximal record length.
buf = from_buffer(file_object.read(2**14), dtype=np.int8)
# This is a messy check - we just delegate to libmseed.
reclen = clibmseed.ms_detect(buf, len(buf))
if reclen < 0:
raise ValueError("Could not detect data record.")
elif reclen == 0:
# It might be at the end of the file.
if len(buf) in [2**_i for _i in range(7, 256)]:
reclen = len(buf)
else:
raise ValueError("Could not determine record length.")
info["record_length"] = reclen
# If samprate not set via blockette 100 calculate the sample rate according
# to the SEED manual.
if not samp_rate:
if (samp_rate_factor > 0) and (samp_rate_mult) > 0:
samp_rate = float(samp_rate_factor * samp_rate_mult)
elif (samp_rate_factor > 0) and (samp_rate_mult) < 0:
samp_rate = -1.0 * float(samp_rate_factor) / float(samp_rate_mult)
elif (samp_rate_factor < 0) and (samp_rate_mult) > 0:
samp_rate = -1.0 * float(samp_rate_mult) / float(samp_rate_factor)
elif (samp_rate_factor < 0) and (samp_rate_mult) < 0:
samp_rate = 1.0 / float(samp_rate_factor * samp_rate_mult)
else:
samp_rate = 0
info["samp_rate"] = samp_rate
info["starttime"] = starttime
# If sample rate is zero set endtime to startime
if samp_rate == 0:
info["endtime"] = starttime
# Endtime is the time of the last sample.
else:
info["endtime"] = starttime + (npts - 1) / samp_rate
info["byteorder"] = endian
info["number_of_records"] = int(info["filesize"] // info["record_length"])
info["excess_bytes"] = int(info["filesize"] % info["record_length"])
# Reset file pointer.
file_object.seek(initial_position, 0)
return info
|
def _get_record_information(file_object, offset=0, endian=None):
"""
Searches the first MiniSEED record stored in file_object at the current
position and returns some information about it.
If offset is given, the MiniSEED record is assumed to start at current
position + offset in file_object.
:param endian: If given, the byte order will be enforced. Can be either "<"
or ">". If None, it will be determined automatically.
Defaults to None.
"""
initial_position = file_object.tell()
record_start = initial_position
samp_rate = None
info = {}
# Apply the offset.
if offset:
file_object.seek(offset, 1)
record_start += offset
# Get the size of the buffer.
file_object.seek(0, 2)
info["filesize"] = int(file_object.tell() - record_start)
file_object.seek(record_start, 0)
_code = file_object.read(8)[6:7]
# Reset the offset if starting somewhere in the middle of the file.
if info["filesize"] % 128 != 0:
# if a multiple of minimal record length 256
record_start = 0
elif _code not in [b"D", b"R", b"Q", b"M", b" "]:
# if valid data record start at all starting with D, R, Q or M
record_start = 0
# Might be a noise record or completely empty.
elif _code == b" ":
try:
_t = file_object.read(120).decode().strip()
except Exception:
raise ValueError("Invalid MiniSEED file.")
if not _t:
info = _get_record_information(file_object=file_object, endian=endian)
file_object.seek(initial_position, 0)
return info
else:
raise ValueError("Invalid MiniSEED file.")
file_object.seek(record_start, 0)
# check if full SEED or MiniSEED
if file_object.read(8)[6:7] == b"V":
# found a full SEED record - seek first MiniSEED record
# search blockette 005, 008 or 010 which contain the record length
blockette_id = file_object.read(3)
while blockette_id not in [b"010", b"008", b"005"]:
if not blockette_id.startswith(b"0"):
msg = (
"SEED Volume Index Control Headers: blockette 0xx"
+ " expected, got %s"
)
raise Exception(msg % blockette_id)
# get length and jump to end of current blockette
blockette_len = int(file_object.read(4))
file_object.seek(blockette_len - 7, 1)
# read next blockette id
blockette_id = file_object.read(3)
# Skip the next bytes containing length of the blockette and version
file_object.seek(8, 1)
# get record length
rec_len = pow(2, int(file_object.read(2)))
# reset file pointer
file_object.seek(record_start, 0)
# cycle through file using record length until first data record found
while file_object.read(7)[6:7] not in [b"D", b"R", b"Q", b"M"]:
record_start += rec_len
file_object.seek(record_start, 0)
# Jump to the network, station, location and channel codes.
file_object.seek(record_start + 8, 0)
data = file_object.read(12)
info["station"] = _decode_header_field("station", data[:5].strip())
info["location"] = _decode_header_field("location", data[5:7].strip())
info["channel"] = _decode_header_field("channel", data[7:10].strip())
info["network"] = _decode_header_field("network", data[10:12].strip())
# Use the date to figure out the byte order.
file_object.seek(record_start + 20, 0)
# Capital letters indicate unsigned quantities.
data = file_object.read(28)
def fmt(s):
return native_str("%sHHBBBxHHhhBBBxlxxH" % s)
def _parse_time(values):
if not (1 <= values[1] <= 366):
msg = "julday out of bounds (wrong endian?): {!s}".format(values[1])
raise InternalMSEEDParseTimeError(msg)
# The spec says values[5] (.0001 seconds) must be between 0-9999 but
# we've encountered files which have a value of 10000. We interpret
# this as an additional second. The approach here is general enough
# to work for any value of values[5].
msec = values[5] * 100
offset = msec // 1000000
if offset:
warnings.warn(
"Record contains a fractional seconds (.0001 secs) of %i - "
"the maximum strictly allowed value is 9999. It will be "
"interpreted as one or more additional seconds." % values[5],
category=UserWarning,
)
try:
t = (
UTCDateTime(
year=values[0],
julday=values[1],
hour=values[2],
minute=values[3],
second=values[4],
microsecond=msec % 1000000,
)
+ offset
)
except TypeError:
msg = "Problem decoding time (wrong endian?)"
raise InternalMSEEDParseTimeError(msg)
return t
if endian is None:
try:
endian = ">"
values = unpack(fmt(endian), data)
starttime = _parse_time(values)
except InternalMSEEDParseTimeError:
endian = "<"
values = unpack(fmt(endian), data)
starttime = _parse_time(values)
else:
values = unpack(fmt(endian), data)
try:
starttime = _parse_time(values)
except InternalMSEEDParseTimeError:
msg = "Invalid starttime found. The passed byte order is likely wrong."
raise ValueError(msg)
npts = values[6]
info["npts"] = npts
samp_rate_factor = values[7]
samp_rate_mult = values[8]
info["activity_flags"] = values[9]
# Bit 1 of the activity flags.
time_correction_applied = bool(info["activity_flags"] & 2)
info["io_and_clock_flags"] = values[10]
info["data_quality_flags"] = values[11]
info["time_correction"] = values[12]
time_correction = values[12]
blkt_offset = values[13]
# Correct the starttime if applicable.
if (time_correction_applied is False) and time_correction:
# Time correction is in units of 0.0001 seconds.
starttime += time_correction * 0.0001
# Traverse the blockettes and parse Blockettes 100, 500, 1000 and/or 1001
# if any of those is found.
while blkt_offset:
file_object.seek(record_start + blkt_offset, 0)
blkt_type, next_blkt = unpack(native_str("%sHH" % endian), file_object.read(4))
if next_blkt != 0 and (next_blkt < 4 or next_blkt - 4 <= blkt_offset):
msg = (
"Invalid blockette offset (%d) less than or equal to "
"current offset (%d)"
) % (next_blkt, blkt_offset)
raise ValueError(msg)
blkt_offset = next_blkt
# Parse in order of likeliness.
if blkt_type == 1000:
encoding, word_order, record_length = unpack(
native_str("%sBBB" % endian), file_object.read(3)
)
if word_order not in ENDIAN:
msg = (
'Invalid word order "%s" in blockette 1000 for '
"record with ID %s.%s.%s.%s at offset %i."
) % (
str(word_order),
info["network"],
info["station"],
info["location"],
info["channel"],
offset,
)
warnings.warn(msg, UserWarning)
elif ENDIAN[word_order] != endian:
msg = "Inconsistent word order."
warnings.warn(msg, UserWarning)
info["encoding"] = encoding
info["record_length"] = 2**record_length
elif blkt_type == 1001:
info["timing_quality"], mu_sec = unpack(
native_str("%sBb" % endian), file_object.read(2)
)
starttime += float(mu_sec) / 1e6
elif blkt_type == 500:
file_object.seek(14, 1)
mu_sec = unpack(native_str("%sb" % endian), file_object.read(1))[0]
starttime += float(mu_sec) / 1e6
elif blkt_type == 100:
samp_rate = unpack(native_str("%sf" % endian), file_object.read(4))[0]
# No blockette 1000 found.
if "record_length" not in info:
file_object.seek(record_start, 0)
# Read 16 kb - should be a safe maximal record length.
buf = from_buffer(file_object.read(2**14), dtype=np.int8)
# This is a messy check - we just delegate to libmseed.
reclen = clibmseed.ms_detect(buf, len(buf))
if reclen < 0:
raise ValueError("Could not detect data record.")
elif reclen == 0:
# It might be at the end of the file.
if len(buf) in [2**_i for _i in range(7, 256)]:
reclen = len(buf)
else:
raise ValueError("Could not determine record length.")
info["record_length"] = reclen
# If samprate not set via blockette 100 calculate the sample rate according
# to the SEED manual.
if not samp_rate:
if (samp_rate_factor > 0) and (samp_rate_mult) > 0:
samp_rate = float(samp_rate_factor * samp_rate_mult)
elif (samp_rate_factor > 0) and (samp_rate_mult) < 0:
samp_rate = -1.0 * float(samp_rate_factor) / float(samp_rate_mult)
elif (samp_rate_factor < 0) and (samp_rate_mult) > 0:
samp_rate = -1.0 * float(samp_rate_mult) / float(samp_rate_factor)
elif (samp_rate_factor < 0) and (samp_rate_mult) < 0:
samp_rate = -1.0 / float(samp_rate_factor * samp_rate_mult)
else:
samp_rate = 0
info["samp_rate"] = samp_rate
info["starttime"] = starttime
# If sample rate is zero set endtime to startime
if samp_rate == 0:
info["endtime"] = starttime
# Endtime is the time of the last sample.
else:
info["endtime"] = starttime + (npts - 1) / samp_rate
info["byteorder"] = endian
info["number_of_records"] = int(info["filesize"] // info["record_length"])
info["excess_bytes"] = int(info["filesize"] % info["record_length"])
# Reset file pointer.
file_object.seek(initial_position, 0)
return info
|
https://github.com/obspy/obspy/issues/2030
|
Traceback (most recent call last):
File "./10_downloader.py", line 122, in <module>
stationxml_storage=stationxml_storage)
File "/home/legovini/obspy/obspy/clients/fdsn/mass_downloader/mass_downloader.py", line 201, in download
threads_per_client=threads_per_client)
File "/home/legovini/obspy/obspy/clients/fdsn/mass_downloader/download_helpers.py", line 857, in download_mseed
[(self.client, self.client_name, chunk) for chunk in chunks])
File "/home/legovini/miniconda3/envs/sismo/lib/python3.6/multiprocessing/pool.py", line 266, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/home/legovini/miniconda3/envs/sismo/lib/python3.6/multiprocessing/pool.py", line 644, in get
raise self._value
File "/home/legovini/miniconda3/envs/sismo/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/home/legovini/miniconda3/envs/sismo/lib/python3.6/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "/home/legovini/obspy/obspy/clients/fdsn/mass_downloader/download_helpers.py", line 842, in star_download_mseed
*args, logger=self.logger)
File "/home/legovini/obspy/obspy/clients/fdsn/mass_downloader/utils.py", line 245, in download_and_split_mseed_bulk
c=filenames[channel_id])
File "/home/legovini/obspy/obspy/clients/fdsn/mass_downloader/utils.py", line 143, in get_filename
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def __setitem__(self, key, value):
""" """
# keys which need to refresh derived values
if key in ["delta", "sampling_rate", "starttime", "npts"]:
# ensure correct data type
if key == "delta":
key = "sampling_rate"
try:
value = 1.0 / float(value)
except ZeroDivisionError:
value = 0.0
elif key == "sampling_rate":
value = float(value)
elif key == "starttime":
value = UTCDateTime(value)
elif key == "npts":
if not isinstance(value, int):
value = int(value)
# set current key
super(Stats, self).__setitem__(key, value)
# set derived value: delta
try:
delta = 1.0 / float(self.sampling_rate)
except ZeroDivisionError:
delta = 0
self.__dict__["delta"] = delta
# set derived value: endtime
if self.npts == 0:
timediff = 0
else:
timediff = float(self.npts - 1) * delta
self.__dict__["endtime"] = self.starttime + timediff
return
# prevent a calibration factor of 0
if key == "calib" and value == 0:
msg = "Calibration factor set to 0.0!"
warnings.warn(msg, UserWarning)
# all other keys
if isinstance(value, dict):
super(Stats, self).__setitem__(key, AttribDict(value))
else:
super(Stats, self).__setitem__(key, value)
|
def __setitem__(self, key, value):
""" """
# keys which need to refresh derived values
if key in ["delta", "sampling_rate", "starttime", "npts"]:
# ensure correct data type
if key == "delta":
key = "sampling_rate"
value = 1.0 / float(value)
elif key == "sampling_rate":
value = float(value)
elif key == "starttime":
value = UTCDateTime(value)
elif key == "npts":
if not isinstance(value, int):
value = int(value)
# set current key
super(Stats, self).__setitem__(key, value)
# set derived value: delta
try:
delta = 1.0 / float(self.sampling_rate)
except ZeroDivisionError:
delta = 0
self.__dict__["delta"] = delta
# set derived value: endtime
if self.npts == 0:
timediff = 0
else:
timediff = float(self.npts - 1) * delta
self.__dict__["endtime"] = self.starttime + timediff
return
# prevent a calibration factor of 0
if key == "calib" and value == 0:
msg = "Calibration factor set to 0.0!"
warnings.warn(msg, UserWarning)
# all other keys
if isinstance(value, dict):
super(Stats, self).__setitem__(key, AttribDict(value))
else:
super(Stats, self).__setitem__(key, value)
|
https://github.com/obspy/obspy/issues/1989
|
---------------------------------------------------------------------------
ZeroDivisionError Traceback (most recent call last)
<ipython-input-7-c105e6dd98fb> in <module>()
----> 1 pickle.loads(pickle.dumps(t))
~/code/obspy/obspy/core/util/attribdict.py in __setstate__(self, adict)
113 self.__dict__.update(self.defaults)
114 # update with pickle dictionary
--> 115 self.update(adict)
116
117 def __getattr__(self, name, default=None):
~/code/obspy/obspy/core/util/attribdict.py in update(self, adict)
140 if key in self.readonly:
141 continue
--> 142 self.__setitem__(key, value)
143
144 def _pretty_str(self, priorized_keys=[], min_label_length=16):
~/code/obspy/obspy/core/trace.py in __setitem__(self, key, value)
159 if key == 'delta':
160 key = 'sampling_rate'
--> 161 value = 1.0 / float(value)
162 elif key == 'sampling_rate':
163 value = float(value)
ZeroDivisionError: float division by zero
|
ZeroDivisionError
|
def _split_routing_response(data):
"""
Splits the routing responses per data center for the EIDAWS output.
Returns a dictionary with the keys being the root URLs of the fdsnws
endpoints and the values the data payloads for that endpoint.
:param data: The return value from the EIDAWS routing service.
"""
split = collections.defaultdict(list)
current_key = None
for line in data.splitlines():
line = line.strip()
if not line:
continue
if "http" in line and "fdsnws" in line:
current_key = line[: line.rfind("/fdsnws")]
continue
split[current_key].append(line)
return {k: "\n".join(v) for k, v in split.items()}
|
def _split_routing_response(data):
"""
Splits the routing responses per data center for the EIDAWS output.
Returns a dictionary with the keys being the root URLs of the fdsnws
endpoints and the values the data payloads for that endpoint.
:param data: The return value from the EIDAWS routing service.
"""
split = collections.defaultdict(list)
current_key = None
for line in data.splitlines():
line = line.strip()
if not line:
continue
if "http" in line and "fdsnws" in line:
current_key = line[: line.find("/fdsnws")]
continue
split[current_key].append(line)
return {k: "\n".join(v) for k, v in split.items()}
|
https://github.com/obspy/obspy/issues/1954
|
Downloading http://service.iris.edu/irisws/fedcatalog/1/query ...
Sending along the following payload:
----------------------------------------------------------------------
level=station
latitude=30
longitude=14
maxradius=10
format=request
* * * * 2017-10-20T00:00:00.000000 *
----------------------------------------------------------------------
Installed new opener with handlers: [<obspy.clients.fdsn.client.CustomRedirectHandler object at 0x7f56ff9d3cf8>]
Base URL: http://eida.gein.noa.gr
Request Headers: {'User-Agent': 'ObsPy/1.1.0rc7.post0+27.g04bbbf2540.obspy.read.isf (Linux-4.9.0-0.bpo.3-amd64-x86_64-with-debian-8.8, Python 3.6.3)'}
Installed new opener with handlers: [<obspy.clients.fdsn.client.CustomRedirectHandler object at 0x7f56ff9ee048>]
Installed new opener with handlers: [<obspy.clients.fdsn.client.CustomRedirectHandler object at 0x7f56ff9e3198>]
Installed new opener with handlers: [<obspy.clients.fdsn.client.CustomRedirectHandler object at 0x7f56ff9e38d0>]
Downloading http://eida.gein.noa.gr/fdsnws/dataselect/1/application.wadl with requesting gzip compression
Downloading http://eida.gein.noa.gr/fdsnws/event/1/application.wadl with requesting gzip compression
Base URL: http://service.iris.edu
Request Headers: {'User-Agent': 'ObsPy/1.1.0rc7.post0+27.g04bbbf2540.obspy.read.isf (Linux-4.9.0-0.bpo.3-amd64-x86_64-with-debian-8.8, Python 3.6.3)'}
Base URL: http://geofon.gfz-potsdam.de
Downloading http://eida.gein.noa.gr/fdsnws/event/1/catalogs with requesting gzip compression
Base URL: http://webservices.ingv.it
Downloading http://eida.gein.noa.gr/fdsnws/station/1/application.wadl with requesting gzip compression
Downloading http://service.iris.edu/fdsnws/event/1/catalogs with requesting gzip compression
Downloading http://service.iris.edu/fdsnws/dataselect/1/application.wadl with requesting gzip compression
Downloading http://service.iris.edu/fdsnws/station/1/application.wadl with requesting gzip compression
Request Headers: {'User-Agent': 'ObsPy/1.1.0rc7.post0+27.g04bbbf2540.obspy.read.isf (Linux-4.9.0-0.bpo.3-amd64-x86_64-with-debian-8.8, Python 3.6.3)'}
Downloading http://eida.gein.noa.gr/fdsnws/event/1/contributors with requesting gzip compression
Request Headers: {'User-Agent': 'ObsPy/1.1.0rc7.post0+27.g04bbbf2540.obspy.read.isf (Linux-4.9.0-0.bpo.3-amd64-x86_64-with-debian-8.8, Python 3.6.3)'}
Downloading http://service.iris.edu/fdsnws/event/1/contributors with requesting gzip compression
Downloading http://webservices.ingv.it/fdsnws/dataselect/1/application.wadl with requesting gzip compression
Downloading http://webservices.ingv.it/fdsnws/event/1/application.wadl with requesting gzip compression
Downloading http://webservices.ingv.it/fdsnws/station/1/application.wadl with requesting gzip compression
Downloading http://service.iris.edu/fdsnws/event/1/application.wadl with requesting gzip compression
Downloading http://geofon.gfz-potsdam.de/fdsnws/dataselect/1/application.wadl with requesting gzip compression
Downloading http://webservices.ingv.it/fdsnws/event/1/contributors with requesting gzip compression
Downloading http://geofon.gfz-potsdam.de/fdsnws/event/1/application.wadl with requesting gzip compression
Downloading http://geofon.gfz-potsdam.de/fdsnws/station/1/application.wadl with requesting gzip compression
Downloading http://webservices.ingv.it/fdsnws/event/1/catalogs with requesting gzip compression
Downloading http://geofon.gfz-potsdam.de/fdsnws/event/1/catalogs with requesting gzip compression
Downloading http://geofon.gfz-potsdam.de/fdsnws/event/1/contributors with requesting gzip compression
HTTP error 404, reason Not Found, while downloading 'http://geofon.gfz-potsdam.de/fdsnws/event/1/application.wadl': b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03]\x8e1\x0e\xc20\x0cEw\x9f\xc2\x17\xa0M\xa0\x08\xc8\x0e#\x03-\xec\x81\x18\x88\xd4&`\xa7\x85\xe3c\t\x89\x01y\xf9\x92\x9f\xfe\x7f[\xe6\xcc\xd8\x98\xc6\xe1>\x17\xdc\xe51\x05\x80\xeeN\xc8\xf4\x1cI\n\x05M\x92G\xbe\x10\x86L\x82I1zG)\x98\x13\x96{\x14\x14\xe2\x89\xb8\x028\x8a\xbf)E\xc5\xc7^\xd03\xa1\x9f4\xfasOx\xe5<`}\r\x92^R\xd3D\xa9\xd4\xb6\x068|W\x1c\xfc\xbf\xfc\xe3\xd1\xc7\x8b/1\xa7\xea\xe5C\xffC\xb1\x1d\xcfC,*\xe6`n\xecjf\xcdl\xbe\xe8\xcc\xd2\x19\xeb\x9aue7\x9b\xb5\xb5\x00\xadZE\x95>\x11\x8b\x968\xb0\x95\x1e|\x00\xfa\xde\x86\x12\xf1\x00\x00\x00'
Uncompressing gzipped response for http://geofon.gfz-potsdam.de/fdsnws/station/1/application.wadl
Downloaded http://geofon.gfz-potsdam.de/fdsnws/station/1/application.wadl with HTTP code: 200
Uncompressing gzipped response for http://geofon.gfz-potsdam.de/fdsnws/dataselect/1/application.wadl
Downloaded http://geofon.gfz-potsdam.de/fdsnws/dataselect/1/application.wadl with HTTP code: 200
Uncompressing gzipped response for http://eida.gein.noa.gr/fdsnws/dataselect/1/application.wadl
Downloaded http://eida.gein.noa.gr/fdsnws/dataselect/1/application.wadl with HTTP code: 200
Uncompressing gzipped response for http://eida.gein.noa.gr/fdsnws/station/1/application.wadl
Downloaded http://eida.gein.noa.gr/fdsnws/station/1/application.wadl with HTTP code: 200
HTTP error 404, reason Not Found, while downloading 'http://eida.gein.noa.gr/fdsnws/event/1/application.wadl': b'Error 404: Not Found\n\nThe requested resource does not exist on this server.\n\nUsage details are available from /fdsnws/event/1/\n\nRequest:\n/fdsnws/event/1/application.wadl\n\nRequest Submitted:\n2017-10-23T05:01:48.260069\n\nService Version:\n1.1.0\n'
HTTP error 404, reason Not Found, while downloading 'http://eida.gein.noa.gr/fdsnws/event/1/contributors': b'Error 404: Not Found\n\nThe requested resource does not exist on this server.\n\nUsage details are available from /fdsnws/event/1/\n\nRequest:\n/fdsnws/event/1/contributors\n\nRequest Submitted:\n2017-10-23T05:01:48.261548\n\nService Version:\n1.1.0\n'
HTTP error 404, reason Not Found, while downloading 'http://eida.gein.noa.gr/fdsnws/event/1/catalogs': b'Error 404: Not Found\n\nThe requested resource does not exist on this server.\n\nUsage details are available from /fdsnws/event/1/\n\nRequest:\n/fdsnws/event/1/catalogs\n\nRequest Submitted:\n2017-10-23T05:01:48.26089\n\nService Version:\n1.1.0\n'
Discovered dataselect service
Discovered station service
Storing discovered services in cache.
Downloading http://eida.gein.noa.gr/fdsnws/station/1/query with requesting gzip compression
Sending along the following payload:
----------------------------------------------------------------------
level=station
latitude=30
longitude=14
maxradius=10
HC GVDS * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HC KLMT * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HC KNDR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HC KTHR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HC RODP * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL ANKY * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL GVD * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL IMMV * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL ITM * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL KEF3 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL KEF4 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL KEF5 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL KTHA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL LXRA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL1 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL2 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL3 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL4 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL5 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL6 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL7 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL8 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL9 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL VLMS * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL VLS * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HP LTHK * * 2017-10-20T00:00:00 2599-12-31T23:59:59
----------------------------------------------------------------------
HTTP error 404, reason Not Found, while downloading 'http://geofon.gfz-potsdam.de/fdsnws/event/1/contributors': b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03]\x8e\xbdn\xc30\x0c\x84w>\x05_\xc0\xb6\xe4\xa8?\xd0\xde\x8e\x1db\xb7\xbb\x1c3\xb5\x80DDI\xca\xc9\xe3G@\x81\x0e\xc5-\x07\xdc\x07\xdc\xf7&\xc2\x82\xc1\x85\x88\x1fl\xf8\xce\xb5\xac\x00\xf3F(\xf4SI\x8d\xd6\xd6\x94\xab\x9c\x08W&\xc5\xd20\xbag5\xe4\x82\xb6eE%\xd9Iz\x80OM\xdf\x8d"K\xf9\xa2\x98\x840\xed\xad\xa6\xe5Bx\x16\xbe\xe2p^\xb5\xdct\xa0\x9d\x8a\r~\x008\xfe\xbeD\xf8?\x9d\xb8\x98\xe4\xa5\x1a\x8b\xfea8\xd5\xe5\x9a\xadIE\x18\x9d\x7f\xe9\xbc\xeb\xc6\xc3\xec\x9e\xa2\xf31\xbc\xf6\x07\xe7\xc6\xe7\x0005\xa3\xdc\x84\xbfH4s\x89\xe0\xfb\x16x\x00\xea\xd1\xf22\xed\x00\x00\x00'
HTTP error 404, reason Not Found, while downloading 'http://geofon.gfz-potsdam.de/fdsnws/event/1/catalogs': b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03]\x8e\xcd\n\xc20\x10\x84\xef\xfb\x14\xfb\x02\xb6I\x1bQr\xd7\xa3\x07\xff\xee\xd1nk\xa0\xcd\xe2nZ}|\x03\x82\x07\x99\xcb\xc0|0\xdfN\x84\x05\x9dq\x1e\x0f\x9cq\xcfs\xea\x00\xce\x0fB\xa1\xe7L\x9a\xa9+My\x96;a\xc7\xa4\x98\nF\xef\xa8\x199a~DE%YH*\x80\x8b\x86\xa1P\x94C\x1c\x15\x83\x10\x86\xa5\xd4p\x1b\t{\xe1\t\xeb\xbe\xd3\xf4\xd2\x9a\x16J\xb9\xb65\xc0\xf1\xfb\xe2\xe1\x7f\xba\x87\x1cF\x1e\xf4\x87\xe0i\xbeM1\x17!\x0f\x8d\xb1\x9b\x955\xab\xa6=\x9b\xb57\xd6\xbbm\xd5\x1agZ\x07p*6\xb1\xc8^I4r\xf2`\xab\x12\xf8\x00\xda\xeae\x17\xe9\x00\x00\x00'
Discovered station service
Discovered dataselect service
Storing discovered services in cache.
Downloading http://geofon.gfz-potsdam.de/fdsnws/station/1/query with requesting gzip compression
Sending along the following payload:
----------------------------------------------------------------------
level=station
latitude=30
longitude=14
maxradius=10
GE GVD * * 2017-10-20T00:00:00 2599-12-31T23:59:59
GE IMMV * * 2017-10-20T00:00:00 2599-12-31T23:59:59
GE KERA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
GE KTHA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
----------------------------------------------------------------------
Downloaded http://webservices.ingv.it/fdsnws/event/1/contributors with HTTP code: 200
Uncompressing gzipped response for http://webservices.ingv.it/fdsnws/dataselect/1/application.wadl
Downloaded http://webservices.ingv.it/fdsnws/dataselect/1/application.wadl with HTTP code: 200
Uncompressing gzipped response for http://webservices.ingv.it/fdsnws/event/1/application.wadl
Downloaded http://webservices.ingv.it/fdsnws/event/1/application.wadl with HTTP code: 200
Uncompressing gzipped response for http://webservices.ingv.it/fdsnws/station/1/application.wadl
Downloaded http://webservices.ingv.it/fdsnws/station/1/application.wadl with HTTP code: 200
Downloaded http://webservices.ingv.it/fdsnws/event/1/catalogs with HTTP code: 200
Discovered dataselect service
Discovered event service
Discovered station service
Storing discovered services in cache.
Downloading http://webservices.ingv.it/fdsnws/station/1/query with requesting gzip compression
Sending along the following payload:
----------------------------------------------------------------------
level=station
latitude=30
longitude=14
maxradius=10
IV AGST * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV AIO * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ALJA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ATN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CAGR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CAR1 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CAVT * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CELI * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CET2 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CLTA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CMDO * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CORL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CRJA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CSLB * * 2017-10-20T00:00:00 2099-10-05T00:00:00
IV ECNV * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ECTS * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV EMCN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV EMSG * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ENIC * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV EPOZ * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV EPZF * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ERC * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ESLN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ESML * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV EVRN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV FAVR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV GALF * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV GIB * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV GMB * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV GRI * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV GRIS * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV HAGA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV HAVL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV HBSP * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV HCRL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV HLNI * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV HMDC * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV HPAC * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV HVZN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV IACL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV IFIL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ILLI * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV IST3 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ISTR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV IVGP * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV IVPL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV JOPP * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV LADO * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV LINA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV LPDG * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MARS * * 2017-10-20T09:42:00 2599-12-31T23:59:59
IV MCPD * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MCSR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MCT * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MEU * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MFNL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MILZ * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MMGO * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MNO * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MPAZ * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MPG * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MPNC * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MSCL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MSFR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MSRU * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MTGR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MTTG * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MUCR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV NOV * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV PETRA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV PIPA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV PLAC * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV PLLN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV PTMD * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV RAFF * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV RESU * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV SERS * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV SN1 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV SOI * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV SOLUN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV SPS2 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV SSY * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV STR4 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV TDS * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV USI * * 2017-10-20T00:00:00 2599-12-31T23:59:59
MN AIO * * 2017-10-20T00:00:00 2599-12-31T23:59:59
MN CEL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
MN CLTB * * 2017-10-20T00:00:00 2599-12-31T23:59:59
MN TIP * * 2017-10-20T00:00:00 2599-12-31T23:59:59
MN VAE * * 2017-10-20T00:00:00 2599-12-31T23:59:59
MN WDD * * 2017-10-20T00:00:00 2599-12-31T23:59:59
----------------------------------------------------------------------
Uncompressing gzipped response for http://eida.gein.noa.gr/fdsnws/station/1/query
Downloaded http://eida.gein.noa.gr/fdsnws/station/1/query with HTTP code: 200
Uncompressing gzipped response for http://service.iris.edu/fdsnws/station/1/application.wadl
Uncompressing gzipped response for http://geofon.gfz-potsdam.de/fdsnws/station/1/query
Downloaded http://geofon.gfz-potsdam.de/fdsnws/station/1/query with HTTP code: 200
Uncompressing gzipped response for http://service.iris.edu/fdsnws/event/1/application.wadl
Downloaded http://service.iris.edu/fdsnws/station/1/application.wadl with HTTP code: 200
Downloaded http://service.iris.edu/fdsnws/event/1/application.wadl with HTTP code: 200
Uncompressing gzipped response for http://webservices.ingv.it/fdsnws/station/1/query
Downloaded http://webservices.ingv.it/fdsnws/station/1/query with HTTP code: 200
Uncompressing gzipped response for http://service.iris.edu/fdsnws/dataselect/1/application.wadl
Downloaded http://service.iris.edu/fdsnws/dataselect/1/application.wadl with HTTP code: 200
Downloaded http://service.iris.edu/fdsnws/event/1/catalogs with HTTP code: 200
Uncompressing gzipped response for http://service.iris.edu/fdsnws/event/1/contributors
Downloaded http://service.iris.edu/fdsnws/event/1/contributors with HTTP code: 200
Discovered station service
Discovered event service
Discovered dataselect service
Storing discovered services in cache.
Downloading http://service.iris.edu/fdsnws/station/1/query with requesting gzip compression
Sending along the following payload:
----------------------------------------------------------------------
level=station
latitude=30
longitude=14
maxradius=10
MN GFA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY AIO * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY CEL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY CLTB * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY GFA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY GHAR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY GVD * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY ITM * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY KARN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY KERA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY MARJ * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY SKD * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY TATN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY TIP * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY VAE * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY WDD * * 2017-10-20T00:00:00 2599-12-31T23:59:59
TT TAMR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
TT TATN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
TT THTN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
----------------------------------------------------------------------
Uncompressing gzipped response for http://service.iris.edu/fdsnws/station/1/query
Downloaded http://service.iris.edu/fdsnws/station/1/query with HTTP code: 200
Traceback (most recent call last):
File "fdsn_bug.py", line 11, in <module>
latitude=30, longitude=14, maxradius=10)
File "<decorator-gen-63>", line 2, in get_stations
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/routing_client.py", line 78, in _assert_filename_not_in_kwargs
return f(*args, **kwargs)
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/federator_routing_client.py", line 110, in get_stations
return super(FederatorRoutingClient, self).get_stations(**kwargs)
File "<decorator-gen-60>", line 2, in get_stations
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/routing_client.py", line 78, in _assert_filename_not_in_kwargs
return f(*args, **kwargs)
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/routing_client.py", line 345, in get_stations
return self.get_stations_bulk([bulk], **kwargs)
File "<decorator-gen-64>", line 2, in get_stations_bulk
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/routing_client.py", line 78, in _assert_filename_not_in_kwargs
return f(*args, **kwargs)
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/federator_routing_client.py", line 147, in get_stations_bulk
return self._download_stations(split, **kwargs)
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/routing_client.py", line 219, in _download_stations
return self._download_parallel(split, data_type="station", **kwargs)
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/routing_client.py", line 245, in _download_parallel
results = pool.map(_download_bulk, dl_requests)
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/multiprocessing/pool.py", line 266, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/multiprocessing/pool.py", line 644, in get
raise self._value
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/routing_client.py", line 96, in _download_bulk
**credentials)
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/client.py", line 229, in __init__
raise ValueError(msg)
ValueError: The FDSN service base URL `http:` is not a valid URL.
|
ValueError
|
def _split_routing_response(data, service):
"""
Splits the routing responses per data center for the federator output.
Returns a dictionary with the keys being the root URLs of the fdsnws
endpoints and the values the data payloads for that endpoint.
:param data: The return value from the EIDAWS routing service.
"""
if service.lower() == "dataselect":
key = "DATASELECTSERVICE"
elif service.lower() == "station":
key = "STATIONSERVICE"
else:
raise ValueError("Service must be 'dataselect' or 'station'.")
split = collections.defaultdict(list)
current_key = None
for line in data.splitlines():
line = line.strip()
if not line:
continue
if "http://" in line:
if key not in line:
continue
current_key = line[len(key) + 1 : line.rfind("/fdsnws")]
continue
# Anything before the first data center can be ignored.
if current_key is None:
continue
split[current_key].append(line)
return {k: "\n".join(v) for k, v in split.items()}
|
def _split_routing_response(data, service):
"""
Splits the routing responses per data center for the federator output.
Returns a dictionary with the keys being the root URLs of the fdsnws
endpoints and the values the data payloads for that endpoint.
:param data: The return value from the EIDAWS routing service.
"""
if service.lower() == "dataselect":
key = "DATASELECTSERVICE"
elif service.lower() == "station":
key = "STATIONSERVICE"
else:
raise ValueError("Service must be 'dataselect' or 'station'.")
split = collections.defaultdict(list)
current_key = None
for line in data.splitlines():
line = line.strip()
if not line:
continue
if "http://" in line:
if key not in line:
continue
current_key = line[len(key) + 1 : line.find("/fdsnws")]
continue
# Anything before the first data center can be ignored.
if current_key is None:
continue
split[current_key].append(line)
return {k: "\n".join(v) for k, v in split.items()}
|
https://github.com/obspy/obspy/issues/1954
|
Downloading http://service.iris.edu/irisws/fedcatalog/1/query ...
Sending along the following payload:
----------------------------------------------------------------------
level=station
latitude=30
longitude=14
maxradius=10
format=request
* * * * 2017-10-20T00:00:00.000000 *
----------------------------------------------------------------------
Installed new opener with handlers: [<obspy.clients.fdsn.client.CustomRedirectHandler object at 0x7f56ff9d3cf8>]
Base URL: http://eida.gein.noa.gr
Request Headers: {'User-Agent': 'ObsPy/1.1.0rc7.post0+27.g04bbbf2540.obspy.read.isf (Linux-4.9.0-0.bpo.3-amd64-x86_64-with-debian-8.8, Python 3.6.3)'}
Installed new opener with handlers: [<obspy.clients.fdsn.client.CustomRedirectHandler object at 0x7f56ff9ee048>]
Installed new opener with handlers: [<obspy.clients.fdsn.client.CustomRedirectHandler object at 0x7f56ff9e3198>]
Installed new opener with handlers: [<obspy.clients.fdsn.client.CustomRedirectHandler object at 0x7f56ff9e38d0>]
Downloading http://eida.gein.noa.gr/fdsnws/dataselect/1/application.wadl with requesting gzip compression
Downloading http://eida.gein.noa.gr/fdsnws/event/1/application.wadl with requesting gzip compression
Base URL: http://service.iris.edu
Request Headers: {'User-Agent': 'ObsPy/1.1.0rc7.post0+27.g04bbbf2540.obspy.read.isf (Linux-4.9.0-0.bpo.3-amd64-x86_64-with-debian-8.8, Python 3.6.3)'}
Base URL: http://geofon.gfz-potsdam.de
Downloading http://eida.gein.noa.gr/fdsnws/event/1/catalogs with requesting gzip compression
Base URL: http://webservices.ingv.it
Downloading http://eida.gein.noa.gr/fdsnws/station/1/application.wadl with requesting gzip compression
Downloading http://service.iris.edu/fdsnws/event/1/catalogs with requesting gzip compression
Downloading http://service.iris.edu/fdsnws/dataselect/1/application.wadl with requesting gzip compression
Downloading http://service.iris.edu/fdsnws/station/1/application.wadl with requesting gzip compression
Request Headers: {'User-Agent': 'ObsPy/1.1.0rc7.post0+27.g04bbbf2540.obspy.read.isf (Linux-4.9.0-0.bpo.3-amd64-x86_64-with-debian-8.8, Python 3.6.3)'}
Downloading http://eida.gein.noa.gr/fdsnws/event/1/contributors with requesting gzip compression
Request Headers: {'User-Agent': 'ObsPy/1.1.0rc7.post0+27.g04bbbf2540.obspy.read.isf (Linux-4.9.0-0.bpo.3-amd64-x86_64-with-debian-8.8, Python 3.6.3)'}
Downloading http://service.iris.edu/fdsnws/event/1/contributors with requesting gzip compression
Downloading http://webservices.ingv.it/fdsnws/dataselect/1/application.wadl with requesting gzip compression
Downloading http://webservices.ingv.it/fdsnws/event/1/application.wadl with requesting gzip compression
Downloading http://webservices.ingv.it/fdsnws/station/1/application.wadl with requesting gzip compression
Downloading http://service.iris.edu/fdsnws/event/1/application.wadl with requesting gzip compression
Downloading http://geofon.gfz-potsdam.de/fdsnws/dataselect/1/application.wadl with requesting gzip compression
Downloading http://webservices.ingv.it/fdsnws/event/1/contributors with requesting gzip compression
Downloading http://geofon.gfz-potsdam.de/fdsnws/event/1/application.wadl with requesting gzip compression
Downloading http://geofon.gfz-potsdam.de/fdsnws/station/1/application.wadl with requesting gzip compression
Downloading http://webservices.ingv.it/fdsnws/event/1/catalogs with requesting gzip compression
Downloading http://geofon.gfz-potsdam.de/fdsnws/event/1/catalogs with requesting gzip compression
Downloading http://geofon.gfz-potsdam.de/fdsnws/event/1/contributors with requesting gzip compression
HTTP error 404, reason Not Found, while downloading 'http://geofon.gfz-potsdam.de/fdsnws/event/1/application.wadl': b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03]\x8e1\x0e\xc20\x0cEw\x9f\xc2\x17\xa0M\xa0\x08\xc8\x0e#\x03-\xec\x81\x18\x88\xd4&`\xa7\x85\xe3c\t\x89\x01y\xf9\x92\x9f\xfe\x7f[\xe6\xcc\xd8\x98\xc6\xe1>\x17\xdc\xe51\x05\x80\xeeN\xc8\xf4\x1cI\n\x05M\x92G\xbe\x10\x86L\x82I1zG)\x98\x13\x96{\x14\x14\xe2\x89\xb8\x028\x8a\xbf)E\xc5\xc7^\xd03\xa1\x9f4\xfasOx\xe5<`}\r\x92^R\xd3D\xa9\xd4\xb6\x068|W\x1c\xfc\xbf\xfc\xe3\xd1\xc7\x8b/1\xa7\xea\xe5C\xffC\xb1\x1d\xcfC,*\xe6`n\xecjf\xcdl\xbe\xe8\xcc\xd2\x19\xeb\x9aue7\x9b\xb5\xb5\x00\xadZE\x95>\x11\x8b\x968\xb0\x95\x1e|\x00\xfa\xde\x86\x12\xf1\x00\x00\x00'
Uncompressing gzipped response for http://geofon.gfz-potsdam.de/fdsnws/station/1/application.wadl
Downloaded http://geofon.gfz-potsdam.de/fdsnws/station/1/application.wadl with HTTP code: 200
Uncompressing gzipped response for http://geofon.gfz-potsdam.de/fdsnws/dataselect/1/application.wadl
Downloaded http://geofon.gfz-potsdam.de/fdsnws/dataselect/1/application.wadl with HTTP code: 200
Uncompressing gzipped response for http://eida.gein.noa.gr/fdsnws/dataselect/1/application.wadl
Downloaded http://eida.gein.noa.gr/fdsnws/dataselect/1/application.wadl with HTTP code: 200
Uncompressing gzipped response for http://eida.gein.noa.gr/fdsnws/station/1/application.wadl
Downloaded http://eida.gein.noa.gr/fdsnws/station/1/application.wadl with HTTP code: 200
HTTP error 404, reason Not Found, while downloading 'http://eida.gein.noa.gr/fdsnws/event/1/application.wadl': b'Error 404: Not Found\n\nThe requested resource does not exist on this server.\n\nUsage details are available from /fdsnws/event/1/\n\nRequest:\n/fdsnws/event/1/application.wadl\n\nRequest Submitted:\n2017-10-23T05:01:48.260069\n\nService Version:\n1.1.0\n'
HTTP error 404, reason Not Found, while downloading 'http://eida.gein.noa.gr/fdsnws/event/1/contributors': b'Error 404: Not Found\n\nThe requested resource does not exist on this server.\n\nUsage details are available from /fdsnws/event/1/\n\nRequest:\n/fdsnws/event/1/contributors\n\nRequest Submitted:\n2017-10-23T05:01:48.261548\n\nService Version:\n1.1.0\n'
HTTP error 404, reason Not Found, while downloading 'http://eida.gein.noa.gr/fdsnws/event/1/catalogs': b'Error 404: Not Found\n\nThe requested resource does not exist on this server.\n\nUsage details are available from /fdsnws/event/1/\n\nRequest:\n/fdsnws/event/1/catalogs\n\nRequest Submitted:\n2017-10-23T05:01:48.26089\n\nService Version:\n1.1.0\n'
Discovered dataselect service
Discovered station service
Storing discovered services in cache.
Downloading http://eida.gein.noa.gr/fdsnws/station/1/query with requesting gzip compression
Sending along the following payload:
----------------------------------------------------------------------
level=station
latitude=30
longitude=14
maxradius=10
HC GVDS * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HC KLMT * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HC KNDR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HC KTHR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HC RODP * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL ANKY * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL GVD * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL IMMV * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL ITM * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL KEF3 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL KEF4 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL KEF5 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL KTHA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL LXRA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL1 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL2 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL3 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL4 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL5 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL6 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL7 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL8 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL PYL9 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL VLMS * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HL VLS * * 2017-10-20T00:00:00 2599-12-31T23:59:59
HP LTHK * * 2017-10-20T00:00:00 2599-12-31T23:59:59
----------------------------------------------------------------------
HTTP error 404, reason Not Found, while downloading 'http://geofon.gfz-potsdam.de/fdsnws/event/1/contributors': b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03]\x8e\xbdn\xc30\x0c\x84w>\x05_\xc0\xb6\xe4\xa8?\xd0\xde\x8e\x1db\xb7\xbb\x1c3\xb5\x80DDI\xca\xc9\xe3G@\x81\x0e\xc5-\x07\xdc\x07\xdc\xf7&\xc2\x82\xc1\x85\x88\x1fl\xf8\xce\xb5\xac\x00\xf3F(\xf4SI\x8d\xd6\xd6\x94\xab\x9c\x08W&\xc5\xd20\xbag5\xe4\x82\xb6eE%\xd9Iz\x80OM\xdf\x8d"K\xf9\xa2\x98\x840\xed\xad\xa6\xe5Bx\x16\xbe\xe2p^\xb5\xdct\xa0\x9d\x8a\r~\x008\xfe\xbeD\xf8?\x9d\xb8\x98\xe4\xa5\x1a\x8b\xfea8\xd5\xe5\x9a\xadIE\x18\x9d\x7f\xe9\xbc\xeb\xc6\xc3\xec\x9e\xa2\xf31\xbc\xf6\x07\xe7\xc6\xe7\x0005\xa3\xdc\x84\xbfH4s\x89\xe0\xfb\x16x\x00\xea\xd1\xf22\xed\x00\x00\x00'
HTTP error 404, reason Not Found, while downloading 'http://geofon.gfz-potsdam.de/fdsnws/event/1/catalogs': b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03]\x8e\xcd\n\xc20\x10\x84\xef\xfb\x14\xfb\x02\xb6I\x1bQr\xd7\xa3\x07\xff\xee\xd1nk\xa0\xcd\xe2nZ}|\x03\x82\x07\x99\xcb\xc0|0\xdfN\x84\x05\x9dq\x1e\x0f\x9cq\xcfs\xea\x00\xce\x0fB\xa1\xe7L\x9a\xa9+My\x96;a\xc7\xa4\x98\nF\xef\xa8\x199a~DE%YH*\x80\x8b\x86\xa1P\x94C\x1c\x15\x83\x10\x86\xa5\xd4p\x1b\t{\xe1\t\xeb\xbe\xd3\xf4\xd2\x9a\x16J\xb9\xb65\xc0\xf1\xfb\xe2\xe1\x7f\xba\x87\x1cF\x1e\xf4\x87\xe0i\xbeM1\x17!\x0f\x8d\xb1\x9b\x955\xab\xa6=\x9b\xb57\xd6\xbbm\xd5\x1agZ\x07p*6\xb1\xc8^I4r\xf2`\xab\x12\xf8\x00\xda\xeae\x17\xe9\x00\x00\x00'
Discovered station service
Discovered dataselect service
Storing discovered services in cache.
Downloading http://geofon.gfz-potsdam.de/fdsnws/station/1/query with requesting gzip compression
Sending along the following payload:
----------------------------------------------------------------------
level=station
latitude=30
longitude=14
maxradius=10
GE GVD * * 2017-10-20T00:00:00 2599-12-31T23:59:59
GE IMMV * * 2017-10-20T00:00:00 2599-12-31T23:59:59
GE KERA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
GE KTHA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
----------------------------------------------------------------------
Downloaded http://webservices.ingv.it/fdsnws/event/1/contributors with HTTP code: 200
Uncompressing gzipped response for http://webservices.ingv.it/fdsnws/dataselect/1/application.wadl
Downloaded http://webservices.ingv.it/fdsnws/dataselect/1/application.wadl with HTTP code: 200
Uncompressing gzipped response for http://webservices.ingv.it/fdsnws/event/1/application.wadl
Downloaded http://webservices.ingv.it/fdsnws/event/1/application.wadl with HTTP code: 200
Uncompressing gzipped response for http://webservices.ingv.it/fdsnws/station/1/application.wadl
Downloaded http://webservices.ingv.it/fdsnws/station/1/application.wadl with HTTP code: 200
Downloaded http://webservices.ingv.it/fdsnws/event/1/catalogs with HTTP code: 200
Discovered dataselect service
Discovered event service
Discovered station service
Storing discovered services in cache.
Downloading http://webservices.ingv.it/fdsnws/station/1/query with requesting gzip compression
Sending along the following payload:
----------------------------------------------------------------------
level=station
latitude=30
longitude=14
maxradius=10
IV AGST * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV AIO * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ALJA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ATN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CAGR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CAR1 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CAVT * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CELI * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CET2 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CLTA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CMDO * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CORL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CRJA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV CSLB * * 2017-10-20T00:00:00 2099-10-05T00:00:00
IV ECNV * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ECTS * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV EMCN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV EMSG * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ENIC * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV EPOZ * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV EPZF * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ERC * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ESLN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ESML * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV EVRN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV FAVR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV GALF * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV GIB * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV GMB * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV GRI * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV GRIS * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV HAGA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV HAVL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV HBSP * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV HCRL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV HLNI * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV HMDC * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV HPAC * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV HVZN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV IACL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV IFIL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ILLI * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV IST3 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV ISTR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV IVGP * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV IVPL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV JOPP * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV LADO * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV LINA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV LPDG * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MARS * * 2017-10-20T09:42:00 2599-12-31T23:59:59
IV MCPD * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MCSR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MCT * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MEU * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MFNL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MILZ * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MMGO * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MNO * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MPAZ * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MPG * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MPNC * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MSCL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MSFR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MSRU * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MTGR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MTTG * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV MUCR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV NOV * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV PETRA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV PIPA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV PLAC * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV PLLN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV PTMD * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV RAFF * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV RESU * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV SERS * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV SN1 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV SOI * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV SOLUN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV SPS2 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV SSY * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV STR4 * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV TDS * * 2017-10-20T00:00:00 2599-12-31T23:59:59
IV USI * * 2017-10-20T00:00:00 2599-12-31T23:59:59
MN AIO * * 2017-10-20T00:00:00 2599-12-31T23:59:59
MN CEL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
MN CLTB * * 2017-10-20T00:00:00 2599-12-31T23:59:59
MN TIP * * 2017-10-20T00:00:00 2599-12-31T23:59:59
MN VAE * * 2017-10-20T00:00:00 2599-12-31T23:59:59
MN WDD * * 2017-10-20T00:00:00 2599-12-31T23:59:59
----------------------------------------------------------------------
Uncompressing gzipped response for http://eida.gein.noa.gr/fdsnws/station/1/query
Downloaded http://eida.gein.noa.gr/fdsnws/station/1/query with HTTP code: 200
Uncompressing gzipped response for http://service.iris.edu/fdsnws/station/1/application.wadl
Uncompressing gzipped response for http://geofon.gfz-potsdam.de/fdsnws/station/1/query
Downloaded http://geofon.gfz-potsdam.de/fdsnws/station/1/query with HTTP code: 200
Uncompressing gzipped response for http://service.iris.edu/fdsnws/event/1/application.wadl
Downloaded http://service.iris.edu/fdsnws/station/1/application.wadl with HTTP code: 200
Downloaded http://service.iris.edu/fdsnws/event/1/application.wadl with HTTP code: 200
Uncompressing gzipped response for http://webservices.ingv.it/fdsnws/station/1/query
Downloaded http://webservices.ingv.it/fdsnws/station/1/query with HTTP code: 200
Uncompressing gzipped response for http://service.iris.edu/fdsnws/dataselect/1/application.wadl
Downloaded http://service.iris.edu/fdsnws/dataselect/1/application.wadl with HTTP code: 200
Downloaded http://service.iris.edu/fdsnws/event/1/catalogs with HTTP code: 200
Uncompressing gzipped response for http://service.iris.edu/fdsnws/event/1/contributors
Downloaded http://service.iris.edu/fdsnws/event/1/contributors with HTTP code: 200
Discovered station service
Discovered event service
Discovered dataselect service
Storing discovered services in cache.
Downloading http://service.iris.edu/fdsnws/station/1/query with requesting gzip compression
Sending along the following payload:
----------------------------------------------------------------------
level=station
latitude=30
longitude=14
maxradius=10
MN GFA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY AIO * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY CEL * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY CLTB * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY GFA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY GHAR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY GVD * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY ITM * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY KARN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY KERA * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY MARJ * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY SKD * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY TATN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY TIP * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY VAE * * 2017-10-20T00:00:00 2599-12-31T23:59:59
SY WDD * * 2017-10-20T00:00:00 2599-12-31T23:59:59
TT TAMR * * 2017-10-20T00:00:00 2599-12-31T23:59:59
TT TATN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
TT THTN * * 2017-10-20T00:00:00 2599-12-31T23:59:59
----------------------------------------------------------------------
Uncompressing gzipped response for http://service.iris.edu/fdsnws/station/1/query
Downloaded http://service.iris.edu/fdsnws/station/1/query with HTTP code: 200
Traceback (most recent call last):
File "fdsn_bug.py", line 11, in <module>
latitude=30, longitude=14, maxradius=10)
File "<decorator-gen-63>", line 2, in get_stations
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/routing_client.py", line 78, in _assert_filename_not_in_kwargs
return f(*args, **kwargs)
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/federator_routing_client.py", line 110, in get_stations
return super(FederatorRoutingClient, self).get_stations(**kwargs)
File "<decorator-gen-60>", line 2, in get_stations
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/routing_client.py", line 78, in _assert_filename_not_in_kwargs
return f(*args, **kwargs)
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/routing_client.py", line 345, in get_stations
return self.get_stations_bulk([bulk], **kwargs)
File "<decorator-gen-64>", line 2, in get_stations_bulk
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/routing_client.py", line 78, in _assert_filename_not_in_kwargs
return f(*args, **kwargs)
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/federator_routing_client.py", line 147, in get_stations_bulk
return self._download_stations(split, **kwargs)
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/routing_client.py", line 219, in _download_stations
return self._download_parallel(split, data_type="station", **kwargs)
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/routing_client.py", line 245, in _download_parallel
results = pool.map(_download_bulk, dl_requests)
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/multiprocessing/pool.py", line 266, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/multiprocessing/pool.py", line 644, in get
raise self._value
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/routing/routing_client.py", line 96, in _download_bulk
**credentials)
File "/home/megies/anaconda/envs/1.1.0py3/lib/python3.6/site-packages/obspy/clients/fdsn/client.py", line 229, in __init__
raise ValueError(msg)
ValueError: The FDSN service base URL `http:` is not a valid URL.
|
ValueError
|
def _run_indexer(options):
logging.info("Starting indexer %s:%s ..." % (options.host, options.port))
# initialize crawler
service = WaveformIndexer((options.host, options.port), MyHandler)
service.log = logging
try:
# prepare paths
if "," in options.data:
paths = options.data.split(",")
else:
paths = [options.data]
paths = service._prepare_paths(paths)
if not paths:
return
# prepare map file
if options.mapping_file:
with open(options.mapping_file, "r") as f:
data = f.readlines()
mappings = parse_mapping_data(data)
logging.info(
"Parsed %d lines from mapping file %s"
% (len(data), options.mapping_file)
)
else:
mappings = {}
# create file queue and worker processes
manager = multiprocessing.Manager()
in_queue = manager.dict()
work_queue = manager.list()
out_queue = manager.list()
log_queue = manager.list()
# spawn processes
for i in range(options.number_of_cpus):
args = (i, in_queue, work_queue, out_queue, log_queue, mappings)
p = multiprocessing.Process(target=worker, args=args)
p.daemon = True
p.start()
# connect to database
engine = create_engine(
options.db_uri, encoding=native_str("utf-8"), convert_unicode=True
)
metadata = Base.metadata
# recreate database
if options.drop_database:
metadata.drop_all(engine, checkfirst=True)
metadata.create_all(engine, checkfirst=True)
# initialize database + options
_session = sessionmaker(bind=engine)
service.session = _session
service.options = options
service.mappings = mappings
# set queues
service.input_queue = in_queue
service.work_queue = work_queue
service.output_queue = out_queue
service.log_queue = log_queue
service.paths = paths
service._reset_walker()
service._step_walker()
service.serve_forever(options.poll_interval)
except KeyboardInterrupt:
quit()
logging.info("Indexer stopped.")
|
def _run_indexer(options):
logging.info("Starting indexer %s:%s ..." % (options.host, options.port))
# initialize crawler
service = WaveformIndexer((options.host, options.port), MyHandler)
service.log = logging
try:
# prepare paths
if "," in options.data:
paths = options.data.split(",")
else:
paths = [options.data]
paths = service._prepare_paths(paths)
if not paths:
return
# prepare map file
if options.mapping_file:
with open(options.mapping_file, "r") as f:
data = f.readlines()
mappings = parse_mapping_data(data)
logging.info(
"Parsed %d lines from mapping file %s"
% (len(data), options.mapping_file)
)
else:
mappings = {}
# create file queue and worker processes
manager = multiprocessing.Manager()
in_queue = manager.dict()
work_queue = manager.list()
out_queue = manager.list()
log_queue = manager.list()
# spawn processes
for i in range(options.number_of_cpus):
args = (i, in_queue, work_queue, out_queue, log_queue, mappings)
p = multiprocessing.Process(target=worker, args=args)
p.daemon = True
p.start()
# connect to database
engine = create_engine(options.db_uri, encoding="utf-8", convert_unicode=True)
metadata = Base.metadata
# recreate database
if options.drop_database:
metadata.drop_all(engine, checkfirst=True)
metadata.create_all(engine, checkfirst=True)
# initialize database + options
_session = sessionmaker(bind=engine)
service.session = _session
service.options = options
service.mappings = mappings
# set queues
service.input_queue = in_queue
service.work_queue = work_queue
service.output_queue = out_queue
service.log_queue = log_queue
service.paths = paths
service._reset_walker()
service._step_walker()
service.serve_forever(options.poll_interval)
except KeyboardInterrupt:
quit()
logging.info("Indexer stopped.")
|
https://github.com/obspy/obspy/issues/1369
|
2016-04-12 11:47:36,562 [INFO] Starting indexer localhost:0 ...
Traceback (most recent call last):
File "/home/richter/anaconda/bin/obspy-indexer", line 9, in <module>
load_entry_point('obspy==1.0.1', 'console_scripts', 'obspy-indexer')()
File "/home/richter/anaconda/lib/python2.7/site-packages/obspy/db/scripts/indexer.py", line 259, in main
_run_indexer(args)
File "/home/richter/anaconda/lib/python2.7/site-packages/obspy/db/scripts/indexer.py", line 146, in _run_indexer
convert_unicode=True)
File "/home/richter/anaconda/lib/python2.7/site-packages/sqlalchemy/engine/__init__.py", line 386, in create_engine
return strategy.create(*args, **kwargs)
File "/home/richter/anaconda/lib/python2.7/site-packages/sqlalchemy/engine/strategies.py", line 80, in create
dialect = dialect_cls(**dialect_args)
File "/home/richter/anaconda/lib/python2.7/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py", line 546, in __init__
PGDialect.__init__(self, **kwargs)
File "/home/richter/anaconda/lib/python2.7/site-packages/sqlalchemy/dialects/postgresql/base.py", line 2022, in __init__
default.DefaultDialect.__init__(self, **kwargs)
File "/home/richter/anaconda/lib/python2.7/site-packages/sqlalchemy/engine/default.py", line 213, in __init__
encoding
File "/home/richter/anaconda/lib/python2.7/site-packages/sqlalchemy/processors.py", line 138, in to_unicode_processor_factory
return UnicodeResultProcessor(encoding).process
TypeError: __init__() argument 1 must be string, not unicode
|
TypeError
|
def depthIncCheck(self):
"""
Check that no slowness layer is too thick.
The maximum is determined by ``self.maxDepthInterval``.
"""
for wave in [self.SWAVE, self.PWAVE]:
# These might change with calls to addSlowness, so be sure we have
# the correct copy.
if wave == self.PWAVE:
layers = self.PLayers
else:
layers = self.SLayers
diff = layers["botDepth"] - layers["topDepth"]
mask = diff > self.maxDepthInterval
diff = diff[mask]
topDepth = layers["topDepth"][mask]
new_count = np.ceil(diff / self.maxDepthInterval).astype(np.int_)
steps = diff / new_count
for start, Nd, delta in zip(topDepth, new_count, steps):
new_depth = start + np.arange(1, Nd) * delta
if wave == self.SWAVE:
velocity = self.vMod.evaluateAbove(new_depth, "S")
smask = velocity == 0
if not self.allowInnerCoreS:
smask |= new_depth >= self.vMod.iocbDepth
if np.any(smask):
velocity[smask] = self.vMod.evaluateAbove(new_depth[smask], "P")
slowness = self.toSlowness(velocity, new_depth)
else:
slowness = self.toSlowness(
self.vMod.evaluateAbove(new_depth, "P"), new_depth
)
for p in slowness:
self.addSlowness(p, self.PWAVE)
self.addSlowness(p, self.SWAVE)
|
def depthIncCheck(self):
"""
Check that no slowness layer is too thick.
The maximum is determined by ``self.maxDepthInterval``.
"""
for wave in [self.SWAVE, self.PWAVE]:
# These might change with calls to addSlowness, so be sure we have
# the correct copy.
if wave == self.PWAVE:
layers = self.PLayers
else:
layers = self.SLayers
diff = layers["botDepth"] - layers["topDepth"]
mask = diff > self.maxDepthInterval
diff = diff[mask]
topDepth = layers["topDepth"][mask]
new_count = np.ceil(diff / self.maxDepthInterval).astype(np.int_)
steps = diff / new_count
for start, Nd, delta in zip(topDepth, new_count, steps):
new_depth = start + np.arange(1, Nd) * delta
if wave == self.SWAVE:
velocity = self.vMod.evaluateAbove(new_depth, "S")
smask = velocity == 0
if not self.allowInnerCoreS:
smask |= new_depth >= self.vMod.iocbDepth
velocity[smask] = self.vMod.evaluateAbove(new_depth[smask], "P")
slowness = self.toSlowness(velocity, new_depth)
else:
slowness = self.toSlowness(
self.vMod.evaluateAbove(new_depth, "P"), new_depth
)
for p in slowness:
self.addSlowness(p, self.PWAVE)
self.addSlowness(p, self.SWAVE)
|
https://github.com/obspy/obspy/issues/1195
|
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/Users/lion/workspace/code/obspy/obspy/taup/taup_create.py", line 119, in run
self.tMod = self.createTauModel(self.vMod)
File "/Users/lion/workspace/code/obspy/obspy/taup/taup_create.py", line 85, in createTauModel
SlownessModel.DEFAULT_SLOWNESS_TOLERANCE)
File "/Users/lion/workspace/code/obspy/obspy/taup/slowness_model.py", line 76, in __init__
self.createSample()
File "/Users/lion/workspace/code/obspy/obspy/taup/slowness_model.py", line 153, in createSample
self.depthIncCheck()
File "/Users/lion/workspace/code/obspy/obspy/taup/slowness_model.py", line 1012, in depthIncCheck
'P')
File "/Users/lion/workspace/code/obspy/obspy/taup/velocity_model.py", line 185, in evaluateAbove
layer = self.layers[self.layerNumberAbove(depth)]
File "/Users/lion/workspace/code/obspy/obspy/taup/velocity_model.py", line 132, in layerNumberAbove
raise LookupError("No such layer.")
LookupError: No such layer.
|
LookupError
|
def plot(self, *args, **kwargs):
"""
Creates a waveform plot of the current ObsPy Stream object.
:param outfile: Output file string. Also used to automatically
determine the output format. Supported file formats depend on your
matplotlib backend. Most backends support png, pdf, ps, eps and
svg. Defaults to ``None``.
:param format: Format of the graph picture. If no format is given the
outfile parameter will be used to try to automatically determine
the output format. If no format is found it defaults to png output.
If no outfile is specified but a format is, than a binary
imagestring will be returned.
Defaults to ``None``.
:param starttime: Start time of the graph as a
:class:`~obspy.core.utcdatetime.UTCDateTime` object. If not set
the graph will be plotted from the beginning.
Defaults to ``None``.
:param endtime: End time of the graph as a
:class:`~obspy.core.utcdatetime.UTCDateTime` object. If not set
the graph will be plotted until the end.
Defaults to ``None``.
:param fig: Use an existing matplotlib figure instance.
Defaults to ``None``.
:param automerge: If automerge is True, Traces with the same id will be
merged.
Defaults to ``True``.
:param size: Size tuple in pixel for the output file. This corresponds
to the resolution of the graph for vector formats.
Defaults to ``(800, 250)`` pixel per channel for ``type='normal'``
or ``type='relative'``, ``(800, 600)`` for ``type='dayplot'``, and
``(1000, 600)`` for ``type='section'``.
:param dpi: Dots per inch of the output file. This also affects the
size of most elements in the graph (text, linewidth, ...).
Defaults to ``100``.
:param color: Color of the graph as a matplotlib color string as
described below. If ``type='dayplot'`` a list/tuple of color
strings is expected that will be periodically repeated for each
line plotted.
Defaults to ``'black'`` or to ``('#B2000F', '#004C12', '#847200',
'#0E01FF')`` for ``type='dayplot'``.
:param bgcolor: Background color of the graph.
Defaults to ``'white'``.
:param face_color: Face color of the matplotlib canvas.
Defaults to ``'white'``.
:param transparent: Make all backgrounds transparent (True/False). This
will override the ``bgcolor`` and ``face_color`` arguments.
Defaults to ``False``.
:param number_of_ticks: The number of ticks on the x-axis.
Defaults to ``4``.
:param tick_format: The way the time axis is formatted.
Defaults to ``'%H:%M:%S'`` or ``'%.2f'`` if ``type='relative'``.
:param tick_rotation: Tick rotation in degrees.
Defaults to ``0``.
:param handle: Whether or not to return the matplotlib figure instance
after the plot has been created.
Defaults to ``False``.
:param method: By default, all traces with more than 400,000 samples
will be plotted with a fast method that cannot be zoomed.
Setting this argument to ``'full'`` will straight up plot the data.
This results in a potentially worse performance but the interactive
matplotlib view can be used properly.
Defaults to 'fast'.
:param type: Type may be set to either ``'dayplot'`` in order to create
a one-day plot for a single Trace or ``'relative'`` to convert all
date/time information to a relative scale, effectively starting
the seismogram at 0 seconds. ``'normal'`` will produce a standard
plot.
Defaults to ``'normal'``.
:param equal_scale: If enabled all plots are equally scaled.
Defaults to ``True``.
:param show: If True, show the plot interactively after plotting. This
is ignored if any of ``outfile``, ``format``, ``handle``, or
``fig`` are specified.
Defaults to ``True``.
:param draw: If True, the figure canvas is explicitly re-drawn, which
ensures that *existing* figures are fresh. It makes no difference
for figures that are not yet visible.
Defaults to ``True``.
:param block: If True block call to showing plot. Only works if the
active matplotlib backend supports it.
Defaults to ``True``.
:param linewidth: Float value in points of the line width.
Defaults to ``1.0``.
:param linestyle: Line style.
Defaults to ``'-'``
:param grid_color: Color of the grid.
Defaults to ``'black'``.
:param grid_linewidth: Float value in points of the grid line width.
Defaults to ``0.5``.
:param grid_linestyle: Grid line style.
Defaults to ``':'``
**Dayplot Parameters**
The following parameters are only available if ``type='dayplot'`` is
set.
:param vertical_scaling_range: Determines how each line is scaled in
its given space. Every line will be centered around its mean value
and then clamped to fit its given space. This argument is the range
in data units that will be used to clamp the data. If the range is
smaller than the actual range, the lines' data may overshoot to
other lines which is usually a desired effect. Larger ranges will
result in a vertical padding.
If ``0``, the actual range of the data will be used and no
overshooting or additional padding will occur.
If ``None`` the range will be chosen to be the 99.5-percentile of
the actual range - so some values will overshoot.
Defaults to ``None``.
:param interval: This defines the interval length in minutes for one
line.
Defaults to ``15``.
:param time_offset: Only used if ``type='dayplot'``. The difference
between the timezone of the data (specified with the kwarg
``timezone``) and UTC time in hours. Will be displayed in a string.
Defaults to the current offset of the system time to UTC time.
:param timezone: Defines the name of the user defined time scale. Will
be displayed in a string together with the actual offset defined in
the kwarg ``time_offset``.
Defaults to ``'local time'``.
:param localization_dict: Enables limited localization of the dayplot
through the usage of a dictionary. To change the labels to, e.g.
German, use the following::
localization_dict={'time in': 'Zeit in', 'seconds': 'Sekunden',
'minutes': 'Minuten', 'hours': 'Stunden'}
:param data_unit: If given, the scale of the data will be drawn on the
right hand side in the form ``"%f {data_unit}"``. The unit is
supposed to be a string containing the actual unit of the data. Can
be a LaTeX expression if matplotlib has been built with LaTeX
support, e.g., ``"$\\\\frac{m}{s}$"``. Be careful to escape the
backslashes, or use r-prefixed strings, e.g.,
``r"$\\\\frac{m}{s}$"``.
Defaults to ``None``, meaning no scale is drawn.
:param events: An optional list of events can be drawn on the plot if
given. They will be displayed as yellow stars with optional
annotations. They are given as a list of dictionaries. Each
dictionary at least needs to have a "time" key, containing a
UTCDateTime object with the origin time of the event. Furthermore
every event can have an optional "text" key which will then be
displayed as an annotation.
Example::
events=[{"time": UTCDateTime(...), "text": "Event A"}, {...}]
It can also be a :class:`~obspy.core.event.Catalog` object. In this
case each event will be annotated with its corresponding
Flinn-Engdahl region and the magnitude.
Events can also be automatically downloaded with the help of
obspy.neries. Just pass a dictionary with a "min_magnitude" key,
e.g. ::
events={"min_magnitude": 5.5}
Defaults to ``[]``.
:param x_labels_size: Size of x labels in points or fontsize.
Defaults to ``8``.
:param y_labels_size: Size of y labels in points or fontsize.
Defaults to ``8``.
:param title_size: Size of the title in points or fontsize.
Defaults to ``10``.
:param subplots_adjust_left: The left side of the subplots of the
figure in fraction of the figure width.
Defaults to ``0.12``.
:param subplots_adjust_right: The right side of the subplots of the
figure in fraction of the figure width.
Defaults to ``0.88``.
:param subplots_adjust_top: The top side of the subplots of the figure
in fraction of the figure width.
Defaults to ``0.95``.
:param subplots_adjust_bottom: The bottom side of the subplots of the
figure in fraction of the figure width.
Defaults to ``0.1``.
:param right_vertical_labels: Whether or not to display labels on the
right side of the dayplot.
Defaults to ``False``.
:param one_tick_per_line: Whether or not to display one tick per line.
Defaults to ``False``.
:param show_y_UTC_label: Whether or not to display the Y UTC vertical
label.
Defaults to ``True``.
:param title: The title to display on top of the plot.
Defaults to ``self.stream[0].id``.
**Section Parameters**
These parameters are only available if ``type='section'`` is set. To
plot a record section the ObsPy header ``trace.stats.distance`` must be
defined in meters (Default). Or ``trace.stats.coordinates.latitude`` &
``trace.stats.coordinates.longitude`` must be set if plotted in
azimuthal distances (``dist_degree=True``) along with ``ev_coord``.
:type scale: float, optional
:param scale: Scale the traces width with this factor.
Defaults to ``1.0``.
:type vred: float, optional
:param vred: Perform velocity reduction, in m/s.
:type norm_method: str, optional
:param norm_method: Defines how the traces are normalized, either
against each ``trace`` or against the global maximum ``stream``.
Defaults to ``trace``.
:type offset_min: float or None, optional
:param offset_min: Minimum offset in meters to plot.
Defaults to minimum offset of all traces.
:type offset_max: float or None, optional
:param offset_max: Maximum offset in meters to plot.
Defaults to maximum offset of all traces.
:type dist_degree: bool, optional
:param dist_degree: Plot trace distance in degree from epicenter. If
``True``, parameter ``ev_coord`` has to be defined.
Defaults to ``False``.
:type ev_coord: tuple or None, optional
:param ev_coord: Event's coordinates as tuple
``(latitude, longitude)``.
:type plot_dx: int, optional
:param plot_dx: Spacing of ticks on the spatial x-axis.
Either km or degree, depending on ``dist_degree``.
:type recordstart: int or float, optional
:param recordstart: Seconds to crop from the beginning.
:type recordlength: int or float, optional
:param recordlength: Length of the record section in seconds.
:type alpha: float, optional
:param alpha: Transparency of the traces between 0.0 - 1.0.
Defaults to ``0.5``.
:type time_down: bool, optional
:param time_down: Flip the plot horizontally, time goes down.
Defaults to ``False``, i.e., time goes up.
**Relative Parameters**
The following parameters are only available if ``type='relative'`` is
set.
:type reftime: :class:`~obspy.core.utcdatetime.UTCDateTime`, optional
:param reftime: The reference time to which the relative scale will
refer.
Defaults to ``starttime``.
.. rubric:: Color Options
Colors can be specified as defined in the :mod:`matplotlib.colors`
documentation.
Short Version: For all color values, you can either use:
* legal `HTML color names <http://www.w3.org/TR/css3-color/#html4>`_,
e.g. ``'blue'``,
* HTML hex strings, e.g. ``'#EE00FF'``,
* pass an string of a R, G, B tuple, where each of the components is a
float value in the range of 0 to 1, e.g. ``'(1, 0.25, 0.5)'``, or
* use single letters for the basic built-in colors, such as ``'b'``
(blue), ``'g'`` (green), ``'r'`` (red), ``'c'`` (cyan), ``'m'``
(magenta), ``'y'`` (yellow), ``'k'`` (black), ``'w'`` (white).
.. rubric:: Example
>>> from obspy import read
>>> st = read()
>>> st.plot() # doctest: +SKIP
.. plot::
from obspy import read
st = read()
st.plot()
"""
from obspy.imaging.waveform import WaveformPlotting
waveform = WaveformPlotting(stream=self, *args, **kwargs)
return waveform.plotWaveform(*args, **kwargs)
|
def plot(self, *args, **kwargs):
"""
Creates a waveform plot of the current ObsPy Stream object.
:param outfile: Output file string. Also used to automatically
determine the output format. Supported file formats depend on your
matplotlib backend. Most backends support png, pdf, ps, eps and
svg. Defaults to ``None``.
:param format: Format of the graph picture. If no format is given the
outfile parameter will be used to try to automatically determine
the output format. If no format is found it defaults to png output.
If no outfile is specified but a format is, than a binary
imagestring will be returned.
Defaults to ``None``.
:param starttime: Start time of the graph as a
:class:`~obspy.core.utcdatetime.UTCDateTime` object. If not set
the graph will be plotted from the beginning.
Defaults to ``None``.
:param endtime: End time of the graph as a
:class:`~obspy.core.utcdatetime.UTCDateTime` object. If not set
the graph will be plotted until the end.
Defaults to ``None``.
:param fig: Use an existing matplotlib figure instance.
Defaults to ``None``.
:param automerge: If automerge is True, Traces with the same id will be
merged.
Defaults to ``True``.
:param size: Size tuple in pixel for the output file. This corresponds
to the resolution of the graph for vector formats.
Defaults to ``(800, 250)`` pixel per channel for ``type='normal'``
or ``type='relative'``, ``(800, 600)`` for ``type='dayplot'``, and
``(1000, 600)`` for ``type='section'``.
:param dpi: Dots per inch of the output file. This also affects the
size of most elements in the graph (text, linewidth, ...).
Defaults to ``100``.
:param color: Color of the graph as a matplotlib color string as
described below. If ``type='dayplot'`` a list/tuple of color
strings is expected that will be periodically repeated for each
line plotted.
Defaults to ``'black'`` or to ``('#B2000F', '#004C12', '#847200',
'#0E01FF')`` for ``type='dayplot'``.
:param bgcolor: Background color of the graph.
Defaults to ``'white'``.
:param face_color: Face color of the matplotlib canvas.
Defaults to ``'white'``.
:param transparent: Make all backgrounds transparent (True/False). This
will override the ``bgcolor`` and ``face_color`` arguments.
Defaults to ``False``.
:param number_of_ticks: The number of ticks on the x-axis.
Defaults to ``4``.
:param tick_format: The way the time axis is formatted.
Defaults to ``'%H:%M:%S'`` or ``'%.2f'`` if ``type='relative'``.
:param tick_rotation: Tick rotation in degrees.
Defaults to ``0``.
:param handle: Whether or not to return the matplotlib figure instance
after the plot has been created.
Defaults to ``False``.
:param method: By default, all traces with more than 400,000 samples
will be plotted with a fast method that cannot be zoomed.
Setting this argument to ``'full'`` will straight up plot the data.
This results in a potentially worse performance but the interactive
matplotlib view can be used properly.
Defaults to 'fast'.
:param type: Type may be set to either ``'dayplot'`` in order to create
a one-day plot for a single Trace or ``'relative'`` to convert all
date/time information to a relative scale, effectively starting
the seismogram at 0 seconds. ``'normal'`` will produce a standard
plot.
Defaults to ``'normal'``.
:param equal_scale: Is enabled all plots are equally scaled.
Defaults to ``True``.
:param block: If True block call to showing plot. Only works if the
active matplotlib backend supports it.
Defaults to ``True``.
:param linewidth: Float value in points of the line width.
Defaults to ``1.0``.
:param linestyle: Line style.
Defaults to ``'-'``
:param grid_color: Color of the grid.
Defaults to ``'black'``.
:param grid_linewidth: Float value in points of the grid line width.
Defaults to ``0.5``.
:param grid_linestyle: Grid line style.
Defaults to ``':'``
**Dayplot Parameters**
The following parameters are only available if ``type='dayplot'`` is
set.
:param vertical_scaling_range: Determines how each line is scaled in
its given space. Every line will be centered around its mean value
and then clamped to fit its given space. This argument is the range
in data units that will be used to clamp the data. If the range is
smaller than the actual range, the lines' data may overshoot to
other lines which is usually a desired effect. Larger ranges will
result in a vertical padding.
If ``0``, the actual range of the data will be used and no
overshooting or additional padding will occur.
If ``None`` the range will be chosen to be the 99.5-percentile of
the actual range - so some values will overshoot.
Defaults to ``None``.
:param interval: This defines the interval length in minutes for one
line.
Defaults to ``15``.
:param time_offset: Only used if ``type='dayplot'``. The difference
between the timezone of the data (specified with the kwarg
``timezone``) and UTC time in hours. Will be displayed in a string.
Defaults to the current offset of the system time to UTC time.
:param timezone: Defines the name of the user defined time scale. Will
be displayed in a string together with the actual offset defined in
the kwarg ``time_offset``.
Defaults to ``'local time'``.
:param localization_dict: Enables limited localization of the dayplot
through the usage of a dictionary. To change the labels to, e.g.
German, use the following::
localization_dict={'time in': 'Zeit in', 'seconds': 'Sekunden',
'minutes': 'Minuten', 'hours': 'Stunden'}
:param data_unit: If given, the scale of the data will be drawn on the
right hand side in the form ``"%f {data_unit}"``. The unit is
supposed to be a string containing the actual unit of the data. Can
be a LaTeX expression if matplotlib has been built with LaTeX
support, e.g., ``"$\\\\frac{m}{s}$"``. Be careful to escape the
backslashes, or use r-prefixed strings, e.g.,
``r"$\\\\frac{m}{s}$"``.
Defaults to ``None``, meaning no scale is drawn.
:param events: An optional list of events can be drawn on the plot if
given. They will be displayed as yellow stars with optional
annotations. They are given as a list of dictionaries. Each
dictionary at least needs to have a "time" key, containing a
UTCDateTime object with the origin time of the event. Furthermore
every event can have an optional "text" key which will then be
displayed as an annotation.
Example::
events=[{"time": UTCDateTime(...), "text": "Event A"}, {...}]
It can also be a :class:`~obspy.core.event.Catalog` object. In this
case each event will be annotated with its corresponding
Flinn-Engdahl region and the magnitude.
Events can also be automatically downloaded with the help of
obspy.neries. Just pass a dictionary with a "min_magnitude" key,
e.g. ::
events={"min_magnitude": 5.5}
Defaults to ``[]``.
:param x_labels_size: Size of x labels in points or fontsize.
Defaults to ``8``.
:param y_labels_size: Size of y labels in points or fontsize.
Defaults to ``8``.
:param title_size: Size of the title in points or fontsize.
Defaults to ``10``.
:param subplots_adjust_left: The left side of the subplots of the
figure in fraction of the figure width.
Defaults to ``0.12``.
:param subplots_adjust_right: The right side of the subplots of the
figure in fraction of the figure width.
Defaults to ``0.88``.
:param subplots_adjust_top: The top side of the subplots of the figure
in fraction of the figure width.
Defaults to ``0.95``.
:param subplots_adjust_bottom: The bottom side of the subplots of the
figure in fraction of the figure width.
Defaults to ``0.1``.
:param right_vertical_labels: Whether or not to display labels on the
right side of the dayplot.
Defaults to ``False``.
:param one_tick_per_line: Whether or not to display one tick per line.
Defaults to ``False``.
:param show_y_UTC_label: Whether or not to display the Y UTC vertical
label.
Defaults to ``True``.
:param title: The title to display on top of the plot.
Defaults to ``self.stream[0].id``.
**Section Parameters**
These parameters are only available if ``type='section'`` is set. To
plot a record section the ObsPy header ``trace.stats.distance`` must be
defined in meters (Default). Or ``trace.stats.coordinates.latitude`` &
``trace.stats.coordinates.longitude`` must be set if plotted in
azimuthal distances (``dist_degree=True``) along with ``ev_coord``.
:type scale: float, optional
:param scale: Scale the traces width with this factor.
Defaults to ``1.0``.
:type vred: float, optional
:param vred: Perform velocity reduction, in m/s.
:type norm: str, optional
:param norm: Defines how the traces are normalized,
either against each ``trace`` or against the global
maximum ``stream``.
Defaults to ``trace``.
:type offset_min: float or None, optional
:param offset_min: Minimum offset in meters to plot.
Defaults to minimum offset of all traces.
:type offset_max: float or None, optional
:param offset_min: Maximum offset in meters to plot.
Defaults to maximum offset of all traces.
:param dist_degree: Plot trace distance in degree from epicenter. If
``True``, parameter ``ev_coord`` has to be defined.
Defaults to ``False``.
:type ev_coord: tuple or None, optional
:param ev_coord: Event's coordinates as tuple
``(latitude, longitude)``.
:type plot_dx: int, optional
:param plot_dx: Spacing of ticks on the spatial x-axis.
Either km or degree, depending on ``dist_degree``.
:type recordstart: int, optional
:param recordstart: Seconds to crop from the beginning.
:type recordlength: int, optional
:param recordlength: Length of the record section in seconds.
:type alpha: float, optional
:param alpha: Transparency of the traces between 0.0 - 1.0.
Defaults to ``0.5``.
:type time_down: bool, optional
:param time_down: Flip the plot horizontally, time goes down.
Defaults to ``False``, i.e., time goes up.
**Relative Parameters**
The following parameters are only available if ``type='relative'`` is
set.
:type reftime: :class:`~obspy.core.utcdatetime.UTCDateTime`, optional
:param reftime: The reference time to which the relative scale will
refer.
Defaults to ``starttime``.
.. rubric:: Color Options
Colors can be specified as defined in the :mod:`matplotlib.colors`
documentation.
Short Version: For all color values, you can either use:
* legal `HTML color names <http://www.w3.org/TR/css3-color/#html4>`_,
e.g. ``'blue'``,
* HTML hex strings, e.g. ``'#EE00FF'``,
* pass an string of a R, G, B tuple, where each of the components is a
float value in the range of 0 to 1, e.g. ``'(1, 0.25, 0.5)'``, or
* use single letters for the basic built-in colors, such as ``'b'``
(blue), ``'g'`` (green), ``'r'`` (red), ``'c'`` (cyan), ``'m'``
(magenta), ``'y'`` (yellow), ``'k'`` (black), ``'w'`` (white).
.. rubric:: Example
>>> from obspy import read
>>> st = read()
>>> st.plot() # doctest: +SKIP
.. plot::
from obspy import read
st = read()
st.plot()
"""
from obspy.imaging.waveform import WaveformPlotting
waveform = WaveformPlotting(stream=self, *args, **kwargs)
return waveform.plotWaveform(*args, **kwargs)
|
https://github.com/obspy/obspy/issues/913
|
>>> >>> >>> Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/kasper/Downloads/waveform_plotting_tutorial_6.py", line 32, in <module>
time_down=True, linewidth=.25, grid_linewidth=.25)
File "/usr/lib/python2.7/site-packages/obspy-0.9.2-py2.7-linux-x86_64.egg/obspy/core/stream.py", line 1058, in plot
return waveform.plotWaveform(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/obspy-0.9.2-py2.7-linux-x86_64.egg/obspy/imaging/waveform.py", line 253, in plotWaveform
self.plotSection(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/obspy-0.9.2-py2.7-linux-x86_64.egg/obspy/imaging/waveform.py", line 1114, in plotSection
self.__sectInitTraces()
File "/usr/lib/python2.7/site-packages/obspy-0.9.2-py2.7-linux-x86_64.egg/obspy/imaging/waveform.py", line 1229, in __sectInitTraces
self.stream[_tr].data, self.max_npts)
File "/usr/lib64/python2.7/site-packages/scipy/signal/signaltools.py", line 1292, in resample
X = fft(x, axis=axis)
File "/usr/lib64/python2.7/site-packages/scipy/fftpack/basic.py", line 222, in fft
raise ValueError("type %s is not supported" % tmp.dtype)
ValueError: type >f4 is not supported
|
ValueError
|
def instBwith(data, fs, fk):
"""
Instantaneous bandwidth of a signal.
Computes the instantaneous bandwidth of the given data which can be
windowed or not. The instantaneous bandwidth is determined by the time
derivative of the envelope normalized by the envelope of the input data.
:type data: :class:`~numpy.ndarray`
:param data: Data to determine instantaneous bandwidth of.
:param fs: Sampling frequency.
:param fk: Filter coefficients for computing time derivative.
:return: **sigma[, dsigma]** - Instantaneous bandwidth of input data, Time
derivative of instantaneous bandwidth (windowed only).
"""
x = envelope(data)
if size(x[1].shape) > 1:
sigma = np.zeros(x[1].shape[0], dtype=np.float64)
i = 0
for row in x[1]:
# faster alternative to calculate A_win_add
A_win_add = np.hstack(
(
[row[0]] * (np.size(fk) // 2),
row,
[row[np.size(row) - 1]] * (np.size(fk) // 2),
)
)
t = signal.lfilter(fk, 1, A_win_add)
# t = t[size(fk) // 2:(size(t) - size(fk) // 2)]
# correct start and end values
t = t[size(fk) - 1 : size(t)]
sigma_win = abs((t * fs) / (row * 2 * pi))
sigma[i] = np.median(sigma_win)
i = i + 1
# faster alternative to calculate sigma_add
sigma_add = np.hstack(
(
[sigma[0]] * (np.size(fk) // 2),
sigma,
[sigma[np.size(sigma) - 1]] * (np.size(fk) // 2),
)
)
dsigma = signal.lfilter(fk, 1, sigma_add)
# dsigma = dsigma[size(fk) // 2:(size(dsigma) - size(fk) // 2)]
# correct start and end values
dsigma = dsigma[size(fk) - 1 : size(dsigma)]
return sigma, dsigma
else:
row = x[1]
sigma = np.zeros(size(x[0]), dtype=np.float64)
# faster alternative to calculate A_win_add
A_win_add = np.hstack(
(
[row[0]] * (np.size(fk) // 2),
row,
[row[np.size(row) - 1]] * (np.size(fk) // 2),
)
)
t = signal.lfilter(fk, 1, A_win_add)
# correct start and end values
t = t[size(fk) - 1 : size(t)]
sigma = abs((t * fs) / (x[1] * 2 * pi))
return sigma
|
def instBwith(data, fs, fk):
"""
Instantaneous bandwidth of a signal.
Computes the instantaneous bandwidth of the given data which can be
windowed or not. The instantaneous bandwidth is determined by the time
derivative of the envelope normalized by the envelope of the input data.
:type data: :class:`~numpy.ndarray`
:param data: Data to determine instantaneous bandwidth of.
:param fs: Sampling frequency.
:param fk: Filter coefficients for computing time derivative.
:return: **sigma[, dsigma]** - Instantaneous bandwidth of input data, Time
derivative of instantaneous bandwidth (windowed only).
"""
x = envelope(data)
if size(x[1].shape) > 1:
sigma = np.zeros(x[1].shape[0], dtype=np.float64)
i = 0
for row in x[1]:
# faster alternative to calculate A_win_add
A_win_add = np.hstack(
(
[row[0]] * (np.size(fk) // 2),
row,
[row[np.size(row) - 1]] * (np.size(fk) // 2),
)
)
t = signal.lfilter(fk, 1, A_win_add)
# t = t[size(fk) // 2:(size(t) - size(fk) // 2)]
# correct start and end values
t = t[size(fk) - 1 : size(t)]
sigma_win = abs((t * fs) / (row * 2 * pi))
sigma[i] = np.median(sigma_win)
i = i + 1
# faster alternative to calculate sigma_add
sigma_add = np.hstack(
(
[sigma[0]] * (np.size(fk) // 2),
sigma,
[sigma[np.size(sigma) - 1]] * (np.size(fk) // 2),
)
)
dsigma = signal.lfilter(fk, 1, sigma_add)
# dsigma = dsigma[size(fk) // 2:(size(dsigma) - size(fk) // 2)]
# correct start and end values
dsigma = dsigma[size(fk) - 1 : size(dsigma)]
return sigma, dsigma
else:
sigma = np.zeros(size(x[0]), dtype=np.float64)
# faster alternative to calculate A_win_add
A_win_add = np.hstack(
(
[row[0]] * (np.size(fk) // 2),
row,
[row[np.size(row) - 1]] * (np.size(fk) // 2),
)
)
t = signal.lfilter(fk, 1, A_win_add)
# correct start and end values
t = t[size(fk) - 1 : size(t)]
sigma = abs((t * fs) / (x[1] * 2 * pi))
return sigma
|
https://github.com/obspy/obspy/issues/903
|
In [1]: from obspy.signal import envelope, instBwith
In [2]: from obspy import read
In [3]: tr = read()[0]
In [4]: import matplotlib.pyplot as plt
In [5]: plt.figure(); plt.plot(instBwith(tr.data, 100, (-1.0, 0.0, 1.0)))
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-5-5f89053a38f3> in <module>()
----> 1 plt.figure(); plt.plot(instBwith(tr.data, 100, (-1.0, 0.0, 1.0)))
/Users/jkmacc/anaconda/lib/python2.7/site-packages/obspy/signal/cpxtrace.pyc in instBwith(data, fs, fk)
303 # faster alternative to calculate A_win_add
304 A_win_add = np.hstack(
--> 305 ([row[0]] * (np.size(fk) // 2), row,
306 [row[np.size(row) - 1]] * (np.size(fk) // 2)))
307 t = signal.lfilter(fk, 1, A_win_add)
UnboundLocalError: local variable 'row' referenced before assignment
|
UnboundLocalError
|
def _getDateTime(self):
"""
Returns a Python datetime object.
:rtype: :class:`datetime.datetime`
:return: Python datetime object.
.. rubric:: Example
>>> dt = UTCDateTime(2008, 10, 1, 12, 30, 35, 45020)
>>> dt.datetime
datetime.datetime(2008, 10, 1, 12, 30, 35, 45020)
"""
# datetime.utcfromtimestamp will cut off but not round
# avoid through adding timedelta - also avoids 2038 problem
return datetime.datetime.utcfromtimestamp(0) + datetime.timedelta(
seconds=self.timestamp
)
|
def _getDateTime(self):
"""
Returns a Python datetime object.
:rtype: :class:`datetime.datetime`
:return: Python datetime object.
.. rubric:: Example
>>> dt = UTCDateTime(2008, 10, 1, 12, 30, 35, 45020)
>>> dt.datetime
datetime.datetime(2008, 10, 1, 12, 30, 35, 45020)
"""
# we are exact at the border of floating point precision
# datetime.utcfromtimestamp will cut off but not round
# avoid through adding extra timedelta
_fsec, _isec = math.modf(self.timestamp)
return datetime.datetime.utcfromtimestamp(_isec) + datetime.timedelta(seconds=_fsec)
|
https://github.com/obspy/obspy/issues/805
|
Python 2.7.6 (default, Mar 22 2014, 22:59:38)
[GCC 4.8.2] on linux2
Type "help", "copyright", "credits" or "license" for more information.
from obspy.core import UTCDateTime
t = UTCDateTime("2014-05-23T22:35:30")
print t
2014-05-23T22:35:30.000000Z
t = UTCDateTime("2599-05-23T22:35:30")
print t
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/dist-packages/obspy/core/utcdatetime.py", line 895, in __str__
return "%s%sZ" % (self.strftime('%Y-%m-%dT%H:%M:%S'),
File "/usr/lib/python2.7/dist-packages/obspy/core/utcdatetime.py", line 1126, in strftime
return self._getDateTime().strftime(format)
File "/usr/lib/python2.7/dist-packages/obspy/core/utcdatetime.py", line 492, in _getDateTime
return datetime.datetime.utcfromtimestamp(self.timestamp)
ValueError: timestamp out of range for platform time_t
|
ValueError
|
def _getDateTime(self):
"""
Returns a Python datetime object.
:rtype: :class:`datetime.datetime`
:return: Python datetime object.
.. rubric:: Example
>>> dt = UTCDateTime(2008, 10, 1, 12, 30, 35, 45020)
>>> dt.datetime
datetime.datetime(2008, 10, 1, 12, 30, 35, 45020)
"""
# datetime.utcfromtimestamp will cut off but not round
# avoid through adding timedelta - also avoids the year 2038 problem
return datetime.datetime.utcfromtimestamp(0) + datetime.timedelta(
seconds=self.timestamp
)
|
def _getDateTime(self):
"""
Returns a Python datetime object.
:rtype: :class:`datetime.datetime`
:return: Python datetime object.
.. rubric:: Example
>>> dt = UTCDateTime(2008, 10, 1, 12, 30, 35, 45020)
>>> dt.datetime
datetime.datetime(2008, 10, 1, 12, 30, 35, 45020)
"""
# datetime.utcfromtimestamp will cut off but not round
# avoid through adding timedelta - also avoids 2038 problem
return datetime.datetime.utcfromtimestamp(0) + datetime.timedelta(
seconds=self.timestamp
)
|
https://github.com/obspy/obspy/issues/805
|
Python 2.7.6 (default, Mar 22 2014, 22:59:38)
[GCC 4.8.2] on linux2
Type "help", "copyright", "credits" or "license" for more information.
from obspy.core import UTCDateTime
t = UTCDateTime("2014-05-23T22:35:30")
print t
2014-05-23T22:35:30.000000Z
t = UTCDateTime("2599-05-23T22:35:30")
print t
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/dist-packages/obspy/core/utcdatetime.py", line 895, in __str__
return "%s%sZ" % (self.strftime('%Y-%m-%dT%H:%M:%S'),
File "/usr/lib/python2.7/dist-packages/obspy/core/utcdatetime.py", line 1126, in strftime
return self._getDateTime().strftime(format)
File "/usr/lib/python2.7/dist-packages/obspy/core/utcdatetime.py", line 492, in _getDateTime
return datetime.datetime.utcfromtimestamp(self.timestamp)
ValueError: timestamp out of range for platform time_t
|
ValueError
|
def _getDateTime(self):
"""
Returns a Python datetime object.
:rtype: :class:`datetime.datetime`
:return: Python datetime object.
.. rubric:: Example
>>> dt = UTCDateTime(2008, 10, 1, 12, 30, 35, 45020)
>>> dt.datetime
datetime.datetime(2008, 10, 1, 12, 30, 35, 45020)
"""
# datetime.utcfromtimestamp will cut off but not round
# avoid through adding timedelta - also avoids 2038 problem
return datetime.datetime.utcfromtimestamp(0) + datetime.timedelta(
seconds=self.timestamp
)
|
def _getDateTime(self):
"""
Returns a Python datetime object.
:rtype: :class:`datetime.datetime`
:return: Python datetime object.
.. rubric:: Example
>>> dt = UTCDateTime(2008, 10, 1, 12, 30, 35, 45020)
>>> dt.datetime
datetime.datetime(2008, 10, 1, 12, 30, 35, 45020)
"""
return datetime.datetime.utcfromtimestamp(self.timestamp)
|
https://github.com/obspy/obspy/issues/805
|
Python 2.7.6 (default, Mar 22 2014, 22:59:38)
[GCC 4.8.2] on linux2
Type "help", "copyright", "credits" or "license" for more information.
from obspy.core import UTCDateTime
t = UTCDateTime("2014-05-23T22:35:30")
print t
2014-05-23T22:35:30.000000Z
t = UTCDateTime("2599-05-23T22:35:30")
print t
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/dist-packages/obspy/core/utcdatetime.py", line 895, in __str__
return "%s%sZ" % (self.strftime('%Y-%m-%dT%H:%M:%S'),
File "/usr/lib/python2.7/dist-packages/obspy/core/utcdatetime.py", line 1126, in strftime
return self._getDateTime().strftime(format)
File "/usr/lib/python2.7/dist-packages/obspy/core/utcdatetime.py", line 492, in _getDateTime
return datetime.datetime.utcfromtimestamp(self.timestamp)
ValueError: timestamp out of range for platform time_t
|
ValueError
|
def _get_lib_name(lib, add_extension_suffix):
"""
Helper function to get an architecture and Python version specific library
filename.
:type add_extension_suffix: bool
:param add_extension_suffix: Numpy distutils adds a suffix to
the filename we specify to build internally (as specified by Python
builtin `sysconfig.get_config_var("EXT_SUFFIX")`. So when loading the
file we have to add this suffix, but not during building.
"""
# our custom defined part of the extension filename
libname = "lib%s_%s_%s_py%s" % (
lib,
platform.system(),
platform.architecture()[0],
"".join([str(i) for i in platform.python_version_tuple()[:2]]),
)
libname = cleanse_pymodule_filename(libname)
# numpy distutils adds extension suffix by itself during build (#771, #755)
if add_extension_suffix:
# append any extension suffix defined by Python for current platform,
# but strip ".so"
ext_suffix = sysconfig.get_config_var("EXT_SUFFIX")
if ext_suffix:
if ext_suffix.endswith(".so"):
ext_suffix = ext_suffix[:-3]
libname = libname + ext_suffix
return libname
|
def _get_lib_name(lib, during_build):
"""
Helper function to get an architecture and Python version specific library
filename.
:type during_build: bool
:param during_build: Specifies whether the library name is requested during
building ObsPy or inside ObsPy code. Numpy distutils adds a suffix to
the filename we specify to build (as specified by Python builtin
`sysconfig.get_config_var("EXT_SUFFIX")`. So when loading the file we
have to add this suffix.
"""
# our custom defined part of the extension filename
libname = "lib%s_%s_%s_py%s" % (
lib,
platform.system(),
platform.architecture()[0],
"".join([str(i) for i in platform.python_version_tuple()[:2]]),
)
libname = cleanse_pymodule_filename(libname)
# numpy distutils adds extension suffix by itself during build (#771, #755)
if not during_build:
# append any extension suffix defined by Python for current platform,
# but strip ".so"
ext_suffix = sysconfig.get_config_var("EXT_SUFFIX")
if ext_suffix:
if ext_suffix.endswith(".so"):
ext_suffix = ext_suffix[:-3]
libname = libname + ext_suffix
return libname
|
https://github.com/obspy/obspy/issues/771
|
$ python3 -c "import obspy.mseed"
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "./obspy/__init__.py", line 43, in <module>
read.__doc__ % make_format_plugin_table("waveform", "read", numspaces=4)
File "./obspy/core/util/base.py", line 394, in make_format_plugin_table
"obspy.plugin.%s.%s" % (group, name), method)
File "/home/vagrant/python3/lib/python3.3/site-packages/setuptools-3.4.3-py3.3.egg/pkg_resources.py", line 351, in load_entry_point
File "/home/vagrant/python3/lib/python3.3/site-packages/setuptools-3.4.3-py3.3.egg/pkg_resources.py", line 2363, in load_entry_point
File "/home/vagrant/python3/lib/python3.3/site-packages/setuptools-3.4.3-py3.3.egg/pkg_resources.py", line 2088, in load
File "./obspy/mseed/core.py", line 15, in <module>
from obspy.mseed.headers import clibmseed, ENCODINGS, HPTMODULUS, \
File "./obspy/mseed/headers.py", line 39, in <module>
raise ImportError(msg)
ImportError: Could not load shared library for obspy.mseed.
./obspy/mseed/../lib/libmseed.so: cannot open shared object file: No such file or directory
|
ImportError
|
def configuration(parent_package="", top_path=None):
"""
Config function mainly used to compile C and Fortran code.
"""
config = Configuration("", parent_package, top_path)
# GSE2
path = os.path.join(SETUP_DIRECTORY, "obspy", "gse2", "src", "GSE_UTI")
files = [os.path.join(path, "gse_functions.c")]
# compiler specific options
kwargs = {}
if IS_MSVC:
# get export symbols
kwargs["export_symbols"] = export_symbols(path, "gse_functions.def")
config.add_extension(
_get_lib_name("gse2", add_extension_suffix=False), files, **kwargs
)
# LIBMSEED
path = os.path.join(SETUP_DIRECTORY, "obspy", "mseed", "src")
files = glob.glob(os.path.join(path, "libmseed", "*.c"))
files.append(os.path.join(path, "obspy-readbuffer.c"))
# compiler specific options
kwargs = {}
if IS_MSVC:
# needed by libmseed lmplatform.h
kwargs["define_macros"] = [("WIN32", "1")]
# get export symbols
kwargs["export_symbols"] = export_symbols(path, "libmseed", "libmseed.def")
kwargs["export_symbols"] += export_symbols(path, "obspy-readbuffer.def")
# workaround Win32 and MSVC - see issue #64
if "32" in platform.architecture()[0]:
kwargs["extra_compile_args"] = ["/fp:strict"]
config.add_extension(
_get_lib_name("mseed", add_extension_suffix=False), files, **kwargs
)
# SEGY
path = os.path.join(SETUP_DIRECTORY, "obspy", "segy", "src")
files = [os.path.join(path, "ibm2ieee.c")]
# compiler specific options
kwargs = {}
if IS_MSVC:
# get export symbols
kwargs["export_symbols"] = export_symbols(path, "libsegy.def")
config.add_extension(
_get_lib_name("segy", add_extension_suffix=False), files, **kwargs
)
# SIGNAL
path = os.path.join(SETUP_DIRECTORY, "obspy", "signal", "src")
files = glob.glob(os.path.join(path, "*.c"))
# compiler specific options
kwargs = {}
if IS_MSVC:
# get export symbols
kwargs["export_symbols"] = export_symbols(path, "libsignal.def")
config.add_extension(
_get_lib_name("signal", add_extension_suffix=False), files, **kwargs
)
# EVALRESP
path = os.path.join(SETUP_DIRECTORY, "obspy", "signal", "src")
files = glob.glob(os.path.join(path, "evalresp", "*.c"))
# compiler specific options
kwargs = {}
if IS_MSVC:
# needed by evalresp evresp.h
kwargs["define_macros"] = [("WIN32", "1")]
# get export symbols
kwargs["export_symbols"] = export_symbols(path, "libevresp.def")
config.add_extension(
_get_lib_name("evresp", add_extension_suffix=False), files, **kwargs
)
# TAUP
path = os.path.join(SETUP_DIRECTORY, "obspy", "taup", "src")
libname = _get_lib_name("tau", add_extension_suffix=False)
files = glob.glob(os.path.join(path, "*.f"))
# compiler specific options
kwargs = {"libraries": []}
# XXX: The build subdirectory is difficult to determine if installed
# via pypi or other means. I could not find a reliable way of doing it.
new_interface_path = os.path.join("build", libname + os.extsep + "pyf")
interface_file = os.path.join(path, "_libtau.pyf")
with open(interface_file, "r") as open_file:
interface_file = open_file.read()
# In the original .pyf file the library is called _libtau.
interface_file = interface_file.replace("_libtau", libname)
if not os.path.exists("build"):
os.mkdir("build")
with open(new_interface_path, "w") as open_file:
open_file.write(interface_file)
files.insert(0, new_interface_path)
# we do not need this when linking with gcc, only when linking with
# gfortran the option -lgcov is required
if os.environ.get("OBSPY_C_COVERAGE", ""):
kwargs["libraries"].append("gcov")
config.add_extension(libname, files, **kwargs)
add_data_files(config)
return config
|
def configuration(parent_package="", top_path=None):
"""
Config function mainly used to compile C and Fortran code.
"""
config = Configuration("", parent_package, top_path)
# GSE2
path = os.path.join(SETUP_DIRECTORY, "obspy", "gse2", "src", "GSE_UTI")
files = [os.path.join(path, "gse_functions.c")]
# compiler specific options
kwargs = {}
if IS_MSVC:
# get export symbols
kwargs["export_symbols"] = export_symbols(path, "gse_functions.def")
config.add_extension(_get_lib_name("gse2", during_build=True), files, **kwargs)
# LIBMSEED
path = os.path.join(SETUP_DIRECTORY, "obspy", "mseed", "src")
files = glob.glob(os.path.join(path, "libmseed", "*.c"))
files.append(os.path.join(path, "obspy-readbuffer.c"))
# compiler specific options
kwargs = {}
if IS_MSVC:
# needed by libmseed lmplatform.h
kwargs["define_macros"] = [("WIN32", "1")]
# get export symbols
kwargs["export_symbols"] = export_symbols(path, "libmseed", "libmseed.def")
kwargs["export_symbols"] += export_symbols(path, "obspy-readbuffer.def")
# workaround Win32 and MSVC - see issue #64
if "32" in platform.architecture()[0]:
kwargs["extra_compile_args"] = ["/fp:strict"]
config.add_extension(_get_lib_name("mseed", during_build=True), files, **kwargs)
# SEGY
path = os.path.join(SETUP_DIRECTORY, "obspy", "segy", "src")
files = [os.path.join(path, "ibm2ieee.c")]
# compiler specific options
kwargs = {}
if IS_MSVC:
# get export symbols
kwargs["export_symbols"] = export_symbols(path, "libsegy.def")
config.add_extension(_get_lib_name("segy", during_build=True), files, **kwargs)
# SIGNAL
path = os.path.join(SETUP_DIRECTORY, "obspy", "signal", "src")
files = glob.glob(os.path.join(path, "*.c"))
# compiler specific options
kwargs = {}
if IS_MSVC:
# get export symbols
kwargs["export_symbols"] = export_symbols(path, "libsignal.def")
config.add_extension(_get_lib_name("signal", during_build=True), files, **kwargs)
# EVALRESP
path = os.path.join(SETUP_DIRECTORY, "obspy", "signal", "src")
files = glob.glob(os.path.join(path, "evalresp", "*.c"))
# compiler specific options
kwargs = {}
if IS_MSVC:
# needed by evalresp evresp.h
kwargs["define_macros"] = [("WIN32", "1")]
# get export symbols
kwargs["export_symbols"] = export_symbols(path, "libevresp.def")
config.add_extension(_get_lib_name("evresp", during_build=True), files, **kwargs)
# TAUP
path = os.path.join(SETUP_DIRECTORY, "obspy", "taup", "src")
libname = _get_lib_name("tau", during_build=True)
files = glob.glob(os.path.join(path, "*.f"))
# compiler specific options
kwargs = {"libraries": []}
# XXX: The build subdirectory is difficult to determine if installed
# via pypi or other means. I could not find a reliable way of doing it.
new_interface_path = os.path.join("build", libname + os.extsep + "pyf")
interface_file = os.path.join(path, "_libtau.pyf")
with open(interface_file, "r") as open_file:
interface_file = open_file.read()
# In the original .pyf file the library is called _libtau.
interface_file = interface_file.replace("_libtau", libname)
if not os.path.exists("build"):
os.mkdir("build")
with open(new_interface_path, "w") as open_file:
open_file.write(interface_file)
files.insert(0, new_interface_path)
# we do not need this when linking with gcc, only when linking with
# gfortran the option -lgcov is required
if os.environ.get("OBSPY_C_COVERAGE", ""):
kwargs["libraries"].append("gcov")
config.add_extension(libname, files, **kwargs)
add_data_files(config)
return config
|
https://github.com/obspy/obspy/issues/771
|
$ python3 -c "import obspy.mseed"
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "./obspy/__init__.py", line 43, in <module>
read.__doc__ % make_format_plugin_table("waveform", "read", numspaces=4)
File "./obspy/core/util/base.py", line 394, in make_format_plugin_table
"obspy.plugin.%s.%s" % (group, name), method)
File "/home/vagrant/python3/lib/python3.3/site-packages/setuptools-3.4.3-py3.3.egg/pkg_resources.py", line 351, in load_entry_point
File "/home/vagrant/python3/lib/python3.3/site-packages/setuptools-3.4.3-py3.3.egg/pkg_resources.py", line 2363, in load_entry_point
File "/home/vagrant/python3/lib/python3.3/site-packages/setuptools-3.4.3-py3.3.egg/pkg_resources.py", line 2088, in load
File "./obspy/mseed/core.py", line 15, in <module>
from obspy.mseed.headers import clibmseed, ENCODINGS, HPTMODULUS, \
File "./obspy/mseed/headers.py", line 39, in <module>
raise ImportError(msg)
ImportError: Could not load shared library for obspy.mseed.
./obspy/mseed/../lib/libmseed.so: cannot open shared object file: No such file or directory
|
ImportError
|
def isQuakeML(filename):
"""
Checks whether a file is QuakeML format.
:type filename: str
:param filename: Name of the QuakeML file to be checked.
:rtype: bool
:return: ``True`` if QuakeML file.
.. rubric:: Example
>>> isQuakeML('/path/to/quakeml.xml') # doctest: +SKIP
True
"""
try:
xml_doc = XMLParser(filename)
except:
return False
# check if node "*/eventParameters/event" for the global namespace exists
try:
namespace = xml_doc._getFirstChildNamespace()
xml_doc.xpath("eventParameters", namespace=namespace)[0]
except:
return False
return True
|
def isQuakeML(filename):
"""
Checks whether a file is QuakeML format.
:type filename: str
:param filename: Name of the QuakeML file to be checked.
:rtype: bool
:return: ``True`` if QuakeML file.
.. rubric:: Example
>>> isQuakeML('/path/to/quakeml.xml') # doctest: +SKIP
True
"""
try:
p = XMLParser(filename)
except:
False
# check node "*/eventParameters/event" for the global namespace exists
try:
namespace = p._getFirstChildNamespace()
p.xpath("eventParameters", namespace=namespace)[0]
except:
return False
return True
|
https://github.com/obspy/obspy/issues/489
|
Traceback (most recent call last):
File "/tmp/testrun/git/obspy/core/tests/test_util_xmlwrapper.py", line 49, in test_init
XMLParser(fh)
File "/tmp/testrun/git/obspy/core/util/xmlwrapper.py", line 73, in __init__
xml_doc.seek(0)
ValueError: I/O operation on closed file
|
ValueError
|
def __init__(self, xml_doc, namespace=None):
"""
Initializes a XMLPaser object.
:type xml_doc: str, filename, file-like object, parsed XML document
:param xml_doc: XML document
:type namespace: str, optional
:param namespace: Document-wide default namespace. Defaults to ``''``.
"""
if isinstance(xml_doc, basestring):
# some string - check if it starts with <?xml
if xml_doc.strip()[0:5].upper().startswith("<?XML"):
xml_doc = StringIO.StringIO(xml_doc)
# parse XML file
self.xml_doc = etree.parse(xml_doc)
elif hasattr(xml_doc, "seek"):
# some file-based content
xml_doc.seek(0)
self.xml_doc = etree.parse(xml_doc)
else:
self.xml_doc = xml_doc
self.xml_root = self.xml_doc.getroot()
self.namespace = namespace or self._getRootNamespace()
|
def __init__(self, xml_doc, namespace=None):
"""
Initializes a XMLPaser object.
:type xml_doc: str, filename, file-like object, parsed XML document
:param xml_doc: XML document
:type namespace: str, optional
:param namespace: Document-wide default namespace. Defaults to ``''``.
"""
if isinstance(xml_doc, basestring):
# some string - check if it starts with <?xml
if xml_doc.strip()[0:5].upper().startswith("<?XML"):
xml_doc = StringIO.StringIO(xml_doc)
# parse XML file
self.xml_doc = etree.parse(xml_doc)
elif hasattr(xml_doc, "seek"):
# some file-based content
self.xml_doc = etree.parse(xml_doc)
else:
self.xml_doc = xml_doc
self.xml_root = self.xml_doc.getroot()
self.namespace = namespace or self._getRootNamespace()
|
https://github.com/obspy/obspy/issues/489
|
Traceback (most recent call last):
File "/tmp/testrun/git/obspy/core/tests/test_util_xmlwrapper.py", line 49, in test_init
XMLParser(fh)
File "/tmp/testrun/git/obspy/core/util/xmlwrapper.py", line 73, in __init__
xml_doc.seek(0)
ValueError: I/O operation on closed file
|
ValueError
|
def __init__(self, xml_doc, namespace=None):
"""
Initializes a XMLPaser object.
:type xml_doc: str, filename, file-like object, parsed XML document
:param xml_doc: XML document
:type namespace: str, optional
:param namespace: Document-wide default namespace. Defaults to ``''``.
"""
if isinstance(xml_doc, basestring):
# some string - check if it starts with <?xml
if xml_doc.strip()[0:5].upper().startswith("<?XML"):
xml_doc = StringIO.StringIO(xml_doc)
# parse XML file
self.xml_doc = etree.parse(xml_doc)
elif hasattr(xml_doc, "seek"):
# some file-based content
xml_doc.seek(0)
self.xml_doc = etree.parse(xml_doc)
# fixes a problem on debian squeeze default python installation.
# xml.etree.parse seems to not rewind the file after parsing, see
# http://tests.obspy.org/?id=3430#0
xml_doc.seek(0)
else:
self.xml_doc = xml_doc
self.xml_root = self.xml_doc.getroot()
self.namespace = namespace or self._getRootNamespace()
|
def __init__(self, xml_doc, namespace=None):
"""
Initializes a XMLPaser object.
:type xml_doc: str, filename, file-like object, parsed XML document
:param xml_doc: XML document
:type namespace: str, optional
:param namespace: Document-wide default namespace. Defaults to ``''``.
"""
if isinstance(xml_doc, basestring):
# some string - check if it starts with <?xml
if xml_doc.strip()[0:5].upper().startswith("<?XML"):
xml_doc = StringIO.StringIO(xml_doc)
# parse XML file
self.xml_doc = etree.parse(xml_doc)
elif hasattr(xml_doc, "seek"):
self.xml_doc = etree.parse(xml_doc)
# fixes a problem on debian squeeze default python installation.
# xml.etree.parse seems to not rewind the file after parsing, see
# http://tests.obspy.org/?id=3430#0
xml_doc.seek(0)
else:
self.xml_doc = xml_doc
self.xml_root = self.xml_doc.getroot()
self.namespace = namespace or self._getRootNamespace()
|
https://github.com/obspy/obspy/issues/489
|
Traceback (most recent call last):
File "/tmp/testrun/git/obspy/core/tests/test_util_xmlwrapper.py", line 49, in test_init
XMLParser(fh)
File "/tmp/testrun/git/obspy/core/util/xmlwrapper.py", line 73, in __init__
xml_doc.seek(0)
ValueError: I/O operation on closed file
|
ValueError
|
def __setattr__(self, key, value):
# 内建属性不放入 key 中
if key.startswith("__") and key.endswith("__"):
super().__setattr__(key, value)
else:
self[key] = value
|
def __setattr__(self, key, value):
self[key] = value
|
https://github.com/Tencent/bk-sops/issues/1984
|
捕获未处理异常,异常具体堆栈->[Traceback (most recent call last):
File "/app/.heroku/python/lib/python3.6/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/app/blueapps/account/decorators.py", line 20, in wrapped_view
return view_func(*args, **kwargs)
File "/app/.heroku/python/lib/python3.6/site-packages/django/views/decorators/http.py", line 40, in inner
return func(request, *args, **kwargs)
File "/app/.heroku/python/lib/python3.6/site-packages/bkoauth/decorators.py", line 17, in _wrapped_view
return view_func(request, *args, **kwargs)
File "/app/gcloud/apigw/decorators.py", line 77, in wrapper
return view_func(request, *args, **kwargs)
File "/app/gcloud/apigw/decorators.py", line 122, in wrapper
return view_func(request, *args, **kwargs)
File "/app/gcloud/apigw/decorators.py", line 215, in wrapper
return view_func(request, *args, **kwargs)
File "/app/gcloud/apigw/views/get_task_detail.py", line 68, in get_task_detail
data = task.get_task_detail()
File "/app/gcloud/taskflow3/models.py", line 1372, in get_task_detail
out['name'] = constants[out['key']]['name']
KeyError: '__dict__'
|
KeyError
|
def execute(self, data, parent_data):
executor = parent_data.get_one_of_inputs("executor")
biz_cc_id = parent_data.get_one_of_inputs("biz_cc_id")
supplier_account = parent_data.get_one_of_inputs("biz_supplier_account")
client = get_client_by_user(executor)
if parent_data.get_one_of_inputs("language"):
translation.activate(parent_data.get_one_of_inputs("language"))
notify_type = data.get_one_of_inputs("bk_notify_type")
receiver_info = data.get_one_of_inputs("bk_receiver_info")
# 兼容原有数据格式
if receiver_info:
receiver_group = receiver_info.get("bk_receiver_group")
more_receiver = receiver_info.get("bk_more_receiver")
else:
receiver_group = data.get_one_of_inputs("bk_receiver_group")
more_receiver = data.get_one_of_inputs("bk_more_receiver")
title = data.get_one_of_inputs("bk_notify_title")
content = data.get_one_of_inputs("bk_notify_content")
code = ""
message = ""
result, msg, receivers = get_notify_receivers(
client, biz_cc_id, supplier_account, receiver_group, more_receiver
)
if not result:
data.set_outputs("ex_data", msg)
return False
for t in notify_type:
kwargs = self._args_gen[t](self, receivers, title, content)
result = getattr(client.cmsi, self._send_func[t])(kwargs)
if not result["result"]:
data.set_outputs("ex_data", result["message"])
return False
code = result["code"]
message = result["message"]
data.set_outputs("code", code)
data.set_outputs("message", message)
return True
|
def execute(self, data, parent_data):
executor = parent_data.get_one_of_inputs("executor")
biz_cc_id = parent_data.get_one_of_inputs("biz_cc_id")
supplier_account = parent_data.get_one_of_inputs("biz_supplier_account")
client = settings.ESB_GET_CLIENT_BY_USER(executor)
if parent_data.get_one_of_inputs("language"):
translation.activate(parent_data.get_one_of_inputs("language"))
notify_type = data.get_one_of_inputs("bk_notify_type")
receiver_info = data.get_one_of_inputs("bk_receiver_info")
# 兼容原有数据格式
if receiver_info:
receiver_group = receiver_info.get("bk_receiver_group")
more_receiver = receiver_info.get("bk_more_receiver")
else:
receiver_group = data.get_one_of_inputs("bk_receiver_group")
more_receiver = data.get_one_of_inputs("bk_more_receiver")
title = data.get_one_of_inputs("bk_notify_title")
content = data.get_one_of_inputs("bk_notify_content")
code = ""
message = ""
result, msg, receivers = get_notify_receivers(
client, biz_cc_id, supplier_account, receiver_group, more_receiver
)
if not result:
data.set_outputs("ex_data", msg)
return False
for t in notify_type:
kwargs = self._args_gen[t](self, receivers, title, content)
result = getattr(client.cmsi, self._send_func[t])(kwargs)
if not result["result"]:
data.set_outputs("ex_data", result["message"])
return False
code = result["code"]
message = result["message"]
data.set_outputs("code", code)
data.set_outputs("message", message)
return True
|
https://github.com/Tencent/bk-sops/issues/324
|
Traceback (most recent call last):
File "/data/app/code/pipeline/engine/core/handlers/service_activity.py", line 77, in handle
success = element.execute(root_pipeline.data)
File "/data/app/code/pipeline/core/flow/activity.py", line 76, in execute
result = self.service.execute(self.data, parent_data)
File "/data/app/code/pipeline_plugins/components/collections/sites/open/bk.py", line 73, in execute
client = settings.ESB_GET_CLIENT_BY_USER(executor)
File "/data/app/code/pipeline/conf/__init__.py", line 27, in __getattr__
raise AttributeError('Settings object has no attribute %s' % key)
AttributeError: Settings object has no attribute ESB_GET_CLIENT_BY_USER
|
AttributeError
|
def get_user_info(request):
client = get_client_by_user(request.user.username)
auth = getattr(client, settings.ESB_AUTH_COMPONENT_SYSTEM)
_get_user_info = getattr(auth, settings.ESB_AUTH_GET_USER_INFO)
user_info = _get_user_info({})
if user_info["result"]:
user_info["data"]["bk_supplier_account"] = 0
return user_info
|
def get_user_info(request):
client = get_client_by_request(request)
auth = getattr(client, settings.ESB_AUTH_COMPONENT_SYSTEM)
_get_user_info = getattr(auth, settings.ESB_AUTH_GET_USER_INFO)
user_info = _get_user_info({})
if "data" in user_info:
user_info["data"]["bk_supplier_account"] = 0
return user_info
|
https://github.com/Tencent/bk-sops/issues/20
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def process_view(self, request, view_func, view_args, view_kwargs):
"""
If a request path contains biz_cc_id parameter, check if current
user has perm view_business or return http 403.
"""
if getattr(view_func, "login_exempt", False):
return None
biz_cc_id = view_kwargs.get("biz_cc_id") or self._get_biz_cc_id_in_rest_request(
request
)
if biz_cc_id and str(biz_cc_id) != "0":
try:
business = prepare_business(request, cc_id=biz_cc_id)
except exceptions.Unauthorized:
# permission denied for target business (irregular request)
return HttpResponse(status=401)
except exceptions.Forbidden:
# target business does not exist (irregular request)
return HttpResponseForbidden()
except exceptions.APIError as e:
ctx = {
"system": e.system,
"api": e.api,
"message": e.message,
}
logger.error(json.dumps(ctx))
return HttpResponse(status=503, content=json.dumps(ctx))
# set time_zone of business
if business.time_zone:
request.session["blueking_timezone"] = business.time_zone
try:
if not request.user.has_perm("view_business", business):
raise exceptions.Unauthorized(
"user[{username}] has no perm view_business of business[{biz}]".format(
username=request.user.username, biz=business.cc_id
)
)
except Exception as e:
logger.exception(
"user[username={username},type={user_type}] has_perm raise error[{error}]".format(
username=request.user.username,
user_type=type(request.user),
error=e,
)
)
return HttpResponseForbidden(e.message)
|
def process_view(self, request, view_func, view_args, view_kwargs):
"""
If a request path contains biz_cc_id parameter, check if current
user has perm view_business or return http 403.
"""
if getattr(view_func, "login_exempt", False):
return None
biz_cc_id = view_kwargs.get("biz_cc_id") or self._get_biz_cc_id_in_rest_request(
request
)
if biz_cc_id and str(biz_cc_id) != "0":
try:
business = prepare_business(request, cc_id=biz_cc_id)
except exceptions.Unauthorized:
# permission denied for target business (irregular request)
return HttpResponse(status=401)
except exceptions.Forbidden:
# target business does not exist (irregular request)
return HttpResponseForbidden()
except exceptions.APIError as e:
ctx = {
"system": e.system,
"api": e.api,
"message": e.message,
}
logger.error(json.dumps(ctx))
return HttpResponse(status=503, content=json.dumps(ctx))
# set time_zone of business
if business.time_zone:
request.session["blueking_timezone"] = business.time_zone
if not request.user.has_perm("view_business", business):
return HttpResponseForbidden()
|
https://github.com/Tencent/bk-sops/issues/20
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def _get_user_business_list(request, use_cache=True):
"""Get authorized business list for a exact username.
:param object request: django request object.
:param bool use_cache: (Optional)
"""
user = request.user
cache_key = "%s_get_user_business_list_%s" % (CACHE_PREFIX, user.username)
data = cache.get(cache_key)
if not (use_cache and data):
user_info = _get_user_info(request)
client = get_client_by_user(request.user.username)
result = client.cc.search_business(
{
"bk_supplier_account": user_info["bk_supplier_account"],
"condition": {
"bk_data_status": {"$in": ["enable", "disabled", None]},
"$or": [
{"bk_biz_developer": {"$regex": user.username}},
{"bk_biz_productor": {"$regex": user.username}},
{"bk_biz_maintainer": {"$regex": user.username}},
{"bk_biz_tester": {"$regex": user.username}},
],
},
}
)
if result["result"]:
data = result["data"]["info"]
cache.set(cache_key, data, DEFAULT_CACHE_TIME_FOR_CC)
elif result.get("code") in ("20101", 20101):
raise exceptions.Unauthorized(result["message"])
elif result.get("code") in ("20103", 20103, "20201", 20201, "20202", 20202):
raise exceptions.Forbidden(result["message"])
else:
raise exceptions.APIError(
"cc", "search_business", result.get("detail_message", result["message"])
)
return data
|
def _get_user_business_list(request, use_cache=True):
"""Get authorized business list for a exact username.
:param object request: django request object.
:param bool use_cache: (Optional)
"""
user = request.user
cache_key = "%s_get_user_business_list_%s" % (CACHE_PREFIX, user.username)
data = cache.get(cache_key)
if not (use_cache and data):
user_info = _get_user_info(request)
client = get_client_by_request(request)
result = client.cc.search_business(
{
"bk_supplier_account": user_info["bk_supplier_account"],
"condition": {
"bk_data_status": {"$in": ["enable", "disabled", None]},
"$or": [
{"bk_biz_developer": {"$regex": user.username}},
{"bk_biz_productor": {"$regex": user.username}},
{"bk_biz_maintainer": {"$regex": user.username}},
{"bk_biz_tester": {"$regex": user.username}},
],
},
}
)
if result["result"]:
data = result["data"]["info"]
cache.set(cache_key, data, DEFAULT_CACHE_TIME_FOR_CC)
elif result.get("code") in ("20101", 20101):
raise exceptions.Unauthorized(result["message"])
elif result.get("code") in ("20103", 20103, "20201", 20201, "20202", 20202):
raise exceptions.Forbidden(result["message"])
else:
raise exceptions.APIError(
"cc", "search_business", result.get("detail_message", result["message"])
)
return data
|
https://github.com/Tencent/bk-sops/issues/20
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def _get_business_info(request, app_id, use_cache=True, use_maintainer=False):
"""Get detail infomations for a exact app_id.
@param object request: django request object.
@param int app_id: cc_id of core.business model.
@param use_maintainer: 使用运维身份请求
"""
username = request.user.username
business = Business.objects.get(cc_id=app_id)
cache_key = "%s_get_business_info_%s_%s" % (CACHE_PREFIX, app_id, username)
data = cache.get(cache_key)
if not (use_cache and data):
if use_maintainer:
client = get_client_by_user_and_biz_id(username, app_id)
else:
client = get_client_by_user(request.user.username)
result = client.cc.search_business(
{
"bk_supplier_account": business.cc_owner,
"condition": {"bk_biz_id": int(app_id)},
}
)
if result["result"]:
if not result["data"]["info"]:
raise exceptions.Forbidden()
data = result["data"]["info"][0]
elif result.get("code") in ("20101", 20101):
raise exceptions.Unauthorized(result["message"])
elif result.get("code") in ("20103", 20103, "20201", 20201, "20202", 20202):
raise exceptions.Forbidden(result["message"])
else:
raise exceptions.APIError(
"cc", "get_app_by_id", result.get("detail_message", result["message"])
)
cache.set(cache_key, data, DEFAULT_CACHE_TIME_FOR_CC)
return data
|
def _get_business_info(request, app_id, use_cache=True, use_maintainer=False):
"""Get detail infomations for a exact app_id.
@param object request: django request object.
@param int app_id: cc_id of core.business model.
@param use_maintainer: 使用运维身份请求
"""
username = request.user.username
business = Business.objects.get(cc_id=app_id)
cache_key = "%s_get_business_info_%s_%s" % (CACHE_PREFIX, app_id, username)
data = cache.get(cache_key)
if not (use_cache and data):
if use_maintainer:
client = get_client_by_user_and_biz_id(username, app_id)
else:
client = get_client_by_request(request)
result = client.cc.search_business(
{
"bk_supplier_account": business.cc_owner,
"condition": {"bk_biz_id": int(app_id)},
}
)
if result["result"]:
if not result["data"]["info"]:
raise exceptions.Forbidden()
data = result["data"]["info"][0]
elif result.get("code") in ("20101", 20101):
raise exceptions.Unauthorized(result["message"])
elif result.get("code") in ("20103", 20103, "20201", 20201, "20202", 20202):
raise exceptions.Forbidden(result["message"])
else:
raise exceptions.APIError(
"cc", "get_app_by_id", result.get("detail_message", result["message"])
)
cache.set(cache_key, data, DEFAULT_CACHE_TIME_FOR_CC)
return data
|
https://github.com/Tencent/bk-sops/issues/20
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def get_job_instance_log(request, biz_cc_id):
client = get_client_by_user(request.user.username)
job_instance_id = request.GET.get("job_instance_id")
log_kwargs = {"bk_biz_id": biz_cc_id, "job_instance_id": job_instance_id}
log_result = client.job.get_job_instance_log(log_kwargs)
return JsonResponse(log_result)
|
def get_job_instance_log(request, biz_cc_id):
client = get_client_by_request(request)
job_instance_id = request.GET.get("job_instance_id")
log_kwargs = {"bk_biz_id": biz_cc_id, "job_instance_id": job_instance_id}
log_result = client.job.get_job_instance_log(log_kwargs)
return JsonResponse(log_result)
|
https://github.com/Tencent/bk-sops/issues/20
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def cmdb_search_host(request, bk_biz_id, bk_supplier_account="", bk_supplier_id=0):
"""
@summary: 获取 CMDB 上业务的 IP 列表,以及 agent 状态等信息
@param request:
@param bk_biz_id: 业务 CMDB ID
@param bk_supplier_account: 业务开发商账号
@param bk_supplier_id: 业务开发商ID
@params fields: list 查询字段,默认只返回 bk_host_innerip、bk_host_name、bk_host_id, 可以查询主机的任意字段,也可以查询
set、module、cloud、agent等信息
@return:
"""
fields = json.loads(request.GET.get("fields", "[]"))
client = get_client_by_user(request.user.username)
condition = [
{
"bk_obj_id": "host",
"fields": [],
}
]
if "set" in fields:
condition.append(
{
"bk_obj_id": "set",
"fields": [],
}
)
if "module" in fields:
condition.append(
{
"bk_obj_id": "module",
"fields": [],
}
)
kwargs = {
"bk_biz_id": bk_biz_id,
"bk_supplier_account": bk_supplier_account,
"condition": condition,
}
host_result = client.cc.search_host(kwargs)
if not host_result["result"]:
message = handle_api_error(
_("配置平台(CMDB)"), "cc.search_host", kwargs, host_result["message"]
)
result = {
"result": False,
"code": ERROR_CODES.API_CMDB_ERROR,
"message": message,
}
return JsonResponse(result)
host_info = host_result["data"]["info"]
data = []
default_fields = ["bk_host_innerip", "bk_host_name", "bk_host_id"]
fields = list(set(default_fields + fields))
for host in host_info:
host_detail = {
field: host["host"][field] for field in fields if field in host["host"]
}
if "set" in fields:
host_detail["set"] = host["set"]
if "module" in fields:
host_detail["module"] = host["module"]
if "cloud" in fields or "agent" in fields:
host_detail["cloud"] = host["host"]["bk_cloud_id"]
data.append(host_detail)
if "agent" in fields:
agent_kwargs = {
"bk_biz_id": bk_biz_id,
"bk_supplier_id": bk_supplier_id,
"hosts": [
{"bk_cloud_id": host["cloud"][0]["id"], "ip": host["bk_host_innerip"]}
for host in data
],
}
agent_result = client.gse.get_agent_status(agent_kwargs)
if not agent_result["result"]:
message = handle_api_error(
_("管控平台(GSE)"),
"gse.get_agent_status",
agent_kwargs,
agent_result["message"],
)
result = {
"result": False,
"code": ERROR_CODES.API_GSE_ERROR,
"message": message,
}
return JsonResponse(result)
agent_data = agent_result["data"]
for host in data:
# agent在线状态,0为不在线,1为在线,-1为未知
agent_info = agent_data.get(
"%s:%s" % (host["cloud"][0]["id"], host["bk_host_innerip"]), {}
)
host["agent"] = agent_info.get("bk_agent_alive", -1)
result = {"result": True, "code": NO_ERROR, "data": data}
return JsonResponse(result)
|
def cmdb_search_host(request, bk_biz_id, bk_supplier_account="", bk_supplier_id=0):
"""
@summary: 获取 CMDB 上业务的 IP 列表,以及 agent 状态等信息
@param request:
@param bk_biz_id: 业务 CMDB ID
@param bk_supplier_account: 业务开发商账号
@param bk_supplier_id: 业务开发商ID
@params fields: list 查询字段,默认只返回 bk_host_innerip、bk_host_name、bk_host_id, 可以查询主机的任意字段,也可以查询
set、module、cloud、agent等信息
@return:
"""
fields = json.loads(request.GET.get("fields", "[]"))
client = get_client_by_request(request)
condition = [
{
"bk_obj_id": "host",
"fields": [],
}
]
if "set" in fields:
condition.append(
{
"bk_obj_id": "set",
"fields": [],
}
)
if "module" in fields:
condition.append(
{
"bk_obj_id": "module",
"fields": [],
}
)
kwargs = {
"bk_biz_id": bk_biz_id,
"bk_supplier_account": bk_supplier_account,
"condition": condition,
}
host_result = client.cc.search_host(kwargs)
if not host_result["result"]:
message = handle_api_error(
_("配置平台(CMDB)"), "cc.search_host", kwargs, host_result["message"]
)
result = {
"result": False,
"code": ERROR_CODES.API_CMDB_ERROR,
"message": message,
}
return JsonResponse(result)
host_info = host_result["data"]["info"]
data = []
default_fields = ["bk_host_innerip", "bk_host_name", "bk_host_id"]
fields = list(set(default_fields + fields))
for host in host_info:
host_detail = {
field: host["host"][field] for field in fields if field in host["host"]
}
if "set" in fields:
host_detail["set"] = host["set"]
if "module" in fields:
host_detail["module"] = host["module"]
if "cloud" in fields or "agent" in fields:
host_detail["cloud"] = host["host"]["bk_cloud_id"]
data.append(host_detail)
if "agent" in fields:
agent_kwargs = {
"bk_biz_id": bk_biz_id,
"bk_supplier_id": bk_supplier_id,
"hosts": [
{"bk_cloud_id": host["cloud"][0]["id"], "ip": host["bk_host_innerip"]}
for host in data
],
}
agent_result = client.gse.get_agent_status(agent_kwargs)
if not agent_result["result"]:
message = handle_api_error(
_("管控平台(GSE)"),
"gse.get_agent_status",
agent_kwargs,
agent_result["message"],
)
result = {
"result": False,
"code": ERROR_CODES.API_GSE_ERROR,
"message": message,
}
return JsonResponse(result)
agent_data = agent_result["data"]
for host in data:
# agent在线状态,0为不在线,1为在线,-1为未知
agent_info = agent_data.get(
"%s:%s" % (host["cloud"][0]["id"], host["bk_host_innerip"]), {}
)
host["agent"] = agent_info.get("bk_agent_alive", -1)
result = {"result": True, "code": NO_ERROR, "data": data}
return JsonResponse(result)
|
https://github.com/Tencent/bk-sops/issues/20
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def cmdb_get_mainline_object_topo(request, bk_biz_id, bk_supplier_account=""):
"""
@summary: 获取配置平台业务拓扑模型
@param request:
@param bk_biz_id:
@param bk_supplier_account:
@return:
"""
kwargs = {
"bk_biz_id": bk_biz_id,
"bk_supplier_account": bk_supplier_account,
}
client = get_client_by_user(request.user.username)
cc_result = client.cc.get_mainline_object_topo(kwargs)
if not cc_result["result"]:
message = handle_api_error(
_("配置平台(CMDB)"),
"cc.get_mainline_object_topo",
kwargs,
cc_result["message"],
)
return {
"result": cc_result["result"],
"code": cc_result["code"],
"message": message,
}
data = cc_result["data"]
for bk_obj in data:
if bk_obj["bk_obj_id"] == "host":
bk_obj["bk_obj_name"] = "IP"
result = {
"result": cc_result["result"],
"code": cc_result["code"],
"data": cc_result["data"],
}
return JsonResponse(result)
|
def cmdb_get_mainline_object_topo(request, bk_biz_id, bk_supplier_account=""):
"""
@summary: 获取配置平台业务拓扑模型
@param request:
@param bk_biz_id:
@param bk_supplier_account:
@return:
"""
kwargs = {
"bk_biz_id": bk_biz_id,
"bk_supplier_account": bk_supplier_account,
}
client = get_client_by_request(request)
cc_result = client.cc.get_mainline_object_topo(kwargs)
if not cc_result["result"]:
message = handle_api_error(
_("配置平台(CMDB)"),
"cc.get_mainline_object_topo",
kwargs,
cc_result["message"],
)
return {
"result": cc_result["result"],
"code": cc_result["code"],
"message": message,
}
data = cc_result["data"]
for bk_obj in data:
if bk_obj["bk_obj_id"] == "host":
bk_obj["bk_obj_name"] = "IP"
result = {
"result": cc_result["result"],
"code": cc_result["code"],
"data": cc_result["data"],
}
return JsonResponse(result)
|
https://github.com/Tencent/bk-sops/issues/20
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def cc_search_object_attribute(request, obj_id, biz_cc_id, supplier_account):
"""
@summary: 获取对象自定义属性
@param request:
@param biz_cc_id:
@return:
"""
client = get_client_by_user(request.user.username)
kwargs = {"bk_obj_id": obj_id, "bk_supplier_account": supplier_account}
cc_result = client.cc.search_object_attribute(kwargs)
if not cc_result["result"]:
message = handle_api_error(
"cc", "cc.search_object_attribute", kwargs, cc_result["message"]
)
logger.error(message)
result = {"result": False, "data": [], "message": message}
return JsonResponse(result)
obj_property = []
for item in cc_result["data"]:
if item["editable"]:
obj_property.append(
{"value": item["bk_property_id"], "text": item["bk_property_name"]}
)
return JsonResponse({"result": True, "data": obj_property})
|
def cc_search_object_attribute(request, obj_id, biz_cc_id, supplier_account):
"""
@summary: 获取对象自定义属性
@param request:
@param biz_cc_id:
@return:
"""
client = get_client_by_request(request)
kwargs = {"bk_obj_id": obj_id, "bk_supplier_account": supplier_account}
cc_result = client.cc.search_object_attribute(kwargs)
if not cc_result["result"]:
message = handle_api_error(
"cc", "cc.search_object_attribute", kwargs, cc_result["message"]
)
logger.error(message)
result = {"result": False, "data": [], "message": message}
return JsonResponse(result)
obj_property = []
for item in cc_result["data"]:
if item["editable"]:
obj_property.append(
{"value": item["bk_property_id"], "text": item["bk_property_name"]}
)
return JsonResponse({"result": True, "data": obj_property})
|
https://github.com/Tencent/bk-sops/issues/20
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def cc_search_create_object_attribute(request, obj_id, biz_cc_id, supplier_account):
client = get_client_by_user(request.user.username)
kwargs = {"bk_obj_id": obj_id, "bk_supplier_account": supplier_account}
cc_result = client.cc.search_object_attribute(kwargs)
if not cc_result["result"]:
message = handle_api_error(
"cc", "cc.search_object_attribute", kwargs, cc_result["message"]
)
logger.error(message)
result = {"result": False, "data": [], "message": message}
return JsonResponse(result)
obj_property = []
for item in cc_result["data"]:
if item["editable"]:
prop_dict = {
"tag_code": item["bk_property_id"],
"type": "input",
"attrs": {
"name": item["bk_property_name"],
"editable": "true",
},
}
if item["bk_property_id"] in ["bk_set_name"]:
prop_dict["attrs"]["validation"] = [{"type": "required"}]
obj_property.append(prop_dict)
return JsonResponse({"result": True, "data": obj_property})
|
def cc_search_create_object_attribute(request, obj_id, biz_cc_id, supplier_account):
client = get_client_by_request(request)
kwargs = {"bk_obj_id": obj_id, "bk_supplier_account": supplier_account}
cc_result = client.cc.search_object_attribute(kwargs)
if not cc_result["result"]:
message = handle_api_error(
"cc", "cc.search_object_attribute", kwargs, cc_result["message"]
)
logger.error(message)
result = {"result": False, "data": [], "message": message}
return JsonResponse(result)
obj_property = []
for item in cc_result["data"]:
if item["editable"]:
prop_dict = {
"tag_code": item["bk_property_id"],
"type": "input",
"attrs": {
"name": item["bk_property_name"],
"editable": "true",
},
}
if item["bk_property_id"] in ["bk_set_name"]:
prop_dict["attrs"]["validation"] = [{"type": "required"}]
obj_property.append(prop_dict)
return JsonResponse({"result": True, "data": obj_property})
|
https://github.com/Tencent/bk-sops/issues/20
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def cc_search_topo(request, obj_id, category, biz_cc_id, supplier_account):
"""
@summary: 查询对象拓扑
@param request:
@param biz_cc_id:
@return:
"""
client = get_client_by_user(request.user.username)
kwargs = {"bk_biz_id": biz_cc_id, "bk_supplier_account": supplier_account}
cc_result = client.cc.search_biz_inst_topo(kwargs)
if not cc_result["result"]:
message = handle_api_error(
"cc", "cc.search_biz_inst_topo", kwargs, cc_result["message"]
)
logger.error(message)
result = {"result": False, "data": [], "message": message}
return JsonResponse(result)
if category in ["normal", "prev", "picker"]:
cc_topo = cc_format_topo_data(cc_result["data"], obj_id, category)
else:
cc_topo = []
return JsonResponse({"result": True, "data": cc_topo})
|
def cc_search_topo(request, obj_id, category, biz_cc_id, supplier_account):
"""
@summary: 查询对象拓扑
@param request:
@param biz_cc_id:
@return:
"""
client = get_client_by_request(request)
kwargs = {"bk_biz_id": biz_cc_id, "bk_supplier_account": supplier_account}
cc_result = client.cc.search_biz_inst_topo(kwargs)
if not cc_result["result"]:
message = handle_api_error(
"cc", "cc.search_biz_inst_topo", kwargs, cc_result["message"]
)
logger.error(message)
result = {"result": False, "data": [], "message": message}
return JsonResponse(result)
if category in ["normal", "prev", "picker"]:
cc_topo = cc_format_topo_data(cc_result["data"], obj_id, category)
else:
cc_topo = []
return JsonResponse({"result": True, "data": cc_topo})
|
https://github.com/Tencent/bk-sops/issues/20
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def job_get_script_list(request, biz_cc_id):
"""
查询业务脚本列表
:param request:
:param biz_cc_id:
:return:
"""
# 查询脚本列表
client = get_client_by_user(request.user.username)
script_type = request.GET.get("type")
kwargs = {
"bk_biz_id": biz_cc_id,
"is_public": True if script_type == "public" else False,
}
script_result = client.job.get_script_list(kwargs)
if not script_result["result"]:
message = handle_api_error(
"cc", "job.get_script_list", kwargs, script_result["message"]
)
logger.error(message)
result = {"result": False, "message": message}
return JsonResponse(result)
script_dict = {}
for script in script_result["data"]["data"]:
script_dict.setdefault(script["name"], []).append(script["id"])
version_data = []
for name, version in script_dict.items():
version_data.append({"text": name, "value": max(version)})
return JsonResponse({"result": True, "data": version_data})
|
def job_get_script_list(request, biz_cc_id):
"""
查询业务脚本列表
:param request:
:param biz_cc_id:
:return:
"""
# 查询脚本列表
client = get_client_by_request(request)
script_type = request.GET.get("type")
kwargs = {
"bk_biz_id": biz_cc_id,
"is_public": True if script_type == "public" else False,
}
script_result = client.job.get_script_list(kwargs)
if not script_result["result"]:
message = handle_api_error(
"cc", "job.get_script_list", kwargs, script_result["message"]
)
logger.error(message)
result = {"result": False, "message": message}
return JsonResponse(result)
script_dict = {}
for script in script_result["data"]["data"]:
script_dict.setdefault(script["name"], []).append(script["id"])
version_data = []
for name, version in script_dict.items():
version_data.append({"text": name, "value": max(version)})
return JsonResponse({"result": True, "data": version_data})
|
https://github.com/Tencent/bk-sops/issues/20
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def job_get_job_tasks_by_biz(request, biz_cc_id):
client = get_client_by_user(request.user.username)
job_result = client.job.get_job_list({"bk_biz_id": biz_cc_id})
if not job_result["result"]:
message = _(
"查询作业平台(JOB)的作业模板[app_id=%s]接口job.get_task返回失败: %s"
) % (biz_cc_id, job_result["message"])
logger.error(message)
result = {"result": False, "data": [], "message": message}
return JsonResponse(result)
task_list = []
for task in job_result["data"]:
task_list.append(
{
"value": task["bk_job_id"],
"text": task["name"],
}
)
return JsonResponse({"result": True, "data": task_list})
|
def job_get_job_tasks_by_biz(request, biz_cc_id):
client = get_client_by_request(request)
job_result = client.job.get_job_list({"bk_biz_id": biz_cc_id})
if not job_result["result"]:
message = _(
"查询作业平台(JOB)的作业模板[app_id=%s]接口job.get_task返回失败: %s"
) % (biz_cc_id, job_result["message"])
logger.error(message)
result = {"result": False, "data": [], "message": message}
return JsonResponse(result)
task_list = []
for task in job_result["data"]:
task_list.append(
{
"value": task["bk_job_id"],
"text": task["name"],
}
)
return JsonResponse({"result": True, "data": task_list})
|
https://github.com/Tencent/bk-sops/issues/20
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def job_get_job_task_detail(request, biz_cc_id, task_id):
client = get_client_by_user(request.user.username)
job_result = client.job.get_job_detail(
{"bk_biz_id": biz_cc_id, "bk_job_id": task_id}
)
if not job_result["result"]:
message = _(
"查询作业平台(JOB)的作业模板详情[app_id=%s]接口job.get_task_detail返回失败: %s"
) % (biz_cc_id, job_result["message"])
logger.error(message)
result = {"result": False, "data": [], "message": message}
return JsonResponse(result)
job_step_type_name = {1: _("脚本"), 2: _("文件"), 4: "SQL"}
task_detail = job_result["data"]
global_var = []
steps = []
for var in task_detail.get("global_vars", []):
# 1-字符串, 2-IP, 3-索引数组, 4-关联数组
if var["type"] in [JOB_VAR_TYPE_STR, JOB_VAR_TYPE_IP, JOB_VAR_TYPE_ARRAY]:
value = var.get("value", "")
else:
value = [
"{plat_id}:{ip}".format(
plat_id=ip_item["bk_cloud_id"], ip=ip_item["ip"]
)
for ip_item in var.get("ip_list", [])
]
global_var.append(
{
"id": var["id"],
# 全局变量类型:1:云参, 2:上下文参数,3:IP
"category": var.get("category", 1),
"name": var["name"],
"type": var["type"],
"value": value,
"description": var["description"],
}
)
for info in task_detail.get("steps", []):
# 1-执行脚本, 2-传文件, 4-传SQL
steps.append(
{
"stepId": info["step_id"],
"name": info["name"],
"scriptParams": info.get("script_param", ""),
"account": info.get("account", ""),
"ipList": "",
"type": info["type"],
"type_name": job_step_type_name.get(info["type"], info["type"]),
}
)
return JsonResponse(
{"result": True, "data": {"global_var": global_var, "steps": steps}}
)
|
def job_get_job_task_detail(request, biz_cc_id, task_id):
client = get_client_by_request(request)
job_result = client.job.get_job_detail(
{"bk_biz_id": biz_cc_id, "bk_job_id": task_id}
)
if not job_result["result"]:
message = _(
"查询作业平台(JOB)的作业模板详情[app_id=%s]接口job.get_task_detail返回失败: %s"
) % (biz_cc_id, job_result["message"])
logger.error(message)
result = {"result": False, "data": [], "message": message}
return JsonResponse(result)
job_step_type_name = {1: _("脚本"), 2: _("文件"), 4: "SQL"}
task_detail = job_result["data"]
global_var = []
steps = []
for var in task_detail.get("global_vars", []):
# 1-字符串, 2-IP, 3-索引数组, 4-关联数组
if var["type"] in [JOB_VAR_TYPE_STR, JOB_VAR_TYPE_IP, JOB_VAR_TYPE_ARRAY]:
value = var.get("value", "")
else:
value = [
"{plat_id}:{ip}".format(
plat_id=ip_item["bk_cloud_id"], ip=ip_item["ip"]
)
for ip_item in var.get("ip_list", [])
]
global_var.append(
{
"id": var["id"],
# 全局变量类型:1:云参, 2:上下文参数,3:IP
"category": var.get("category", 1),
"name": var["name"],
"type": var["type"],
"value": value,
"description": var["description"],
}
)
for info in task_detail.get("steps", []):
# 1-执行脚本, 2-传文件, 4-传SQL
steps.append(
{
"stepId": info["step_id"],
"name": info["name"],
"scriptParams": info.get("script_param", ""),
"account": info.get("account", ""),
"ipList": "",
"type": info["type"],
"type_name": job_step_type_name.get(info["type"], info["type"]),
}
)
return JsonResponse(
{"result": True, "data": {"global_var": global_var, "steps": steps}}
)
|
https://github.com/Tencent/bk-sops/issues/20
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def get_bk_user(request):
bkuser = None
if request.weixin_user and not isinstance(request.weixin_user, AnonymousUser):
user_model = get_user_model()
try:
user_property = UserProperty.objects.get(
key="wx_userid", value=request.weixin_user.userid
)
except UserProperty.DoesNotExist:
logger.warning(
"user[wx_userid=%s] not in UserProperty" % request.weixin_user.userid
)
else:
bkuser = user_model.objects.get(username=user_property.user.username)
return bkuser or AnonymousUser()
|
def get_bk_user(request):
bkuser = None
if request.weixin_user and not isinstance(request.weixin_user, AnonymousUser):
try:
user_property = UserProperty.objects.get(
key="wx_userid", value=request.weixin_user.userid
)
bkuser = user_property.user
except UserProperty.DoesNotExist:
bkuser = None
return bkuser or AnonymousUser()
|
https://github.com/Tencent/bk-sops/issues/20
|
------STARTING: Migrate Database------
Traceback (most recent call last):
File "manage.py", line 27, in <module>
execute_from_command_line(sys.argv)
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/cache/.bk/env/lib/python2.7/site-packages/django/core/management/__init__.py", line 328, in execute
django.setup()
File "/cache/.bk/env/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/cache/.bk/env/lib/python2.7/site-packages/django/apps/config.py", line 112, in create
mod = import_module(mod_path)
File "/cache/.bk/env/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/app/code/pipeline/apps.py", line 18, in <module>
from rediscluster import StrictRedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/__init__.py", line 7, in <module>
from .client import StrictRedisCluster, RedisCluster
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/client.py", line 10, in <module>
from .connection import (
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/connection.py", line 11, in <module>
from .nodemanager import NodeManager
File "/cache/.bk/env/lib/python2.7/site-packages/rediscluster/nodemanager.py", line 12, in <module>
from redis._compat import b, unicode, bytes, long, basestring
ImportError: cannot import name b
------FAILURE: Migrate Database------
|
ImportError
|
def fit(self, dataset: Dataset):
"""Calculates statistics for this workflow on the input dataset
Parameters
-----------
dataset: Dataset
The input dataset to calculate statistics for. If there is a train/test split this
data should be the training dataset only.
"""
self._clear_worker_cache()
ddf = dataset.to_ddf(columns=self._input_columns())
# Get a dictionary mapping all StatOperators we need to fit to a set of any dependant
# StatOperators (having StatOperators that depend on the output of other StatOperators
# means that will have multiple phases in the fit cycle here)
stat_ops = {
op: _get_stat_ops(op.parents) for op in _get_stat_ops([self.column_group])
}
while stat_ops:
# get all the StatOperators that we can currently call fit on (no outstanding
# dependencies)
current_phase = [
op for op, dependencies in stat_ops.items() if not dependencies
]
if not current_phase:
# this shouldn't happen, but lets not infinite loop just in case
raise RuntimeError("failed to find dependency-free StatOperator to fit")
stats, ops = [], []
for column_group in current_phase:
# apply transforms necessary for the inputs to the current column group, ignoring
# the transforms from the statop itself
transformed_ddf = _transform_ddf(ddf, column_group.parents)
op = column_group.op
try:
stats.append(op.fit(column_group.input_column_names, transformed_ddf))
ops.append(op)
except Exception:
LOG.exception("Failed to fit operator %s", column_group.op)
raise
if self.client:
results = [r.result() for r in self.client.compute(stats)]
else:
results = dask.compute(stats, scheduler="synchronous")[0]
for computed_stats, op in zip(results, ops):
op.fit_finalize(computed_stats)
# Remove all the operators we processed in this phase, and remove
# from the dependencies of other ops too
for stat_op in current_phase:
stat_ops.pop(stat_op)
for dependencies in stat_ops.values():
dependencies.difference_update(current_phase)
# hack: store input/output dtypes here. We should have complete dtype
# information for each operator (like we do for column names), but as
# an interim solution this gets us what we need.
input_dtypes = dataset.to_ddf()[self._input_columns()].dtypes
self.input_dtypes = dict(zip(input_dtypes.index, input_dtypes))
output_dtypes = self.transform(dataset).to_ddf().head(1).dtypes
self.output_dtypes = dict(zip(output_dtypes.index, output_dtypes))
|
def fit(self, dataset: Dataset):
"""Calculates statistics for this workflow on the input dataset
Parameters
-----------
dataset: Dataset
The input dataset to calculate statistics for. If there is a train/test split this
data should be the training dataset only.
"""
self._clear_worker_cache()
ddf = dataset.to_ddf(columns=self._input_columns())
# Get a dictionary mapping all StatOperators we need to fit to a set of any dependant
# StatOperators (having StatOperators that depend on the output of other StatOperators
# means that will have multiple phases in the fit cycle here)
stat_ops = {
op: _get_stat_ops(op.parents) for op in _get_stat_ops([self.column_group])
}
while stat_ops:
# get all the StatOperators that we can currently call fit on (no outstanding
# dependencies)
current_phase = [
op for op, dependencies in stat_ops.items() if not dependencies
]
if not current_phase:
# this shouldn't happen, but lets not infinite loop just in case
raise RuntimeError("failed to find dependency-free StatOperator to fit")
stats, ops = [], []
for column_group in current_phase:
# apply transforms necessary for the inputs to the current column group, ignoring
# the transforms from the statop itself
transformed_ddf = _transform_ddf(ddf, column_group.parents)
op = column_group.op
try:
stats.append(op.fit(column_group.input_column_names, transformed_ddf))
ops.append(op)
except Exception:
LOG.exception("Failed to fit operator %s", column_group.op)
raise
if self.client:
results = [r.result() for r in self.client.compute(stats)]
else:
results = dask.compute(stats, scheduler="synchronous")[0]
for computed_stats, op in zip(results, ops):
op.fit_finalize(computed_stats)
# Remove all the operators we processed in this phase, and remove
# from the dependencies of other ops too
for stat_op in current_phase:
stat_ops.pop(stat_op)
for dependencies in stat_ops.values():
dependencies.difference_update(current_phase)
# hack: store input/output dtypes here. We should have complete dtype
# information for each operator (like we do for column names), but as
# an interim solution this gets us what we need.
input_dtypes = dataset.to_ddf().dtypes
self.input_dtypes = dict(zip(input_dtypes.index, input_dtypes))
output_dtypes = self.transform(dataset).to_ddf().head(1).dtypes
self.output_dtypes = dict(zip(output_dtypes.index, output_dtypes))
|
https://github.com/NVIDIA/NVTabular/issues/598
|
E0224 15:58:10.330248 178 model_repository_manager.cc:963] failed to load 'amazonreview_tf' version 1: Internal: unable to create stream: the provided PTX was compiled with an unsupported toolchain.
/nvtabular/nvtabular/workflow.py:236: UserWarning: Loading workflow generated with cudf version 0+untagged.1.gbd321d1 - but we are running cudf 0.18.0a+253.g53ed28e91c. This might cause issues
warnings.warn(
E0224 15:58:20.534884 178 model_repository_manager.cc:963] failed to load 'amazonreview_nvt' version 1: Internal: Traceback (most recent call last):
File "/opt/tritonserver/backends/python/startup.py", line 197, in Init
self.backend.initialize(args)
File "/models/models/amazonreview_nvt/1/model.py", line 57, in initialize
self.output_dtypes[name] = triton_string_to_numpy(conf["data_type"])
TypeError: 'NoneType' object is not subscriptable
I0224 15:58:20.535093 178 server.cc:490]
|
TypeError
|
def main(args):
"""Multi-GPU Criteo/DLRM Preprocessing Benchmark
This benchmark is designed to measure the time required to preprocess
the Criteo (1TB) dataset for Facebook’s DLRM model. The user must specify
the path of the raw dataset (using the `--data-path` flag), as well as the
output directory for all temporary/final data (using the `--out-path` flag)
Example Usage
-------------
python dask-nvtabular-criteo-benchmark.py
--data-path /path/to/criteo_parquet --out-path /out/dir/`
Dataset Requirements (Parquet)
------------------------------
This benchmark is designed with a parquet-formatted dataset in mind.
While a CSV-formatted dataset can be processed by NVTabular, converting
to parquet will yield significantly better performance. To convert your
dataset, try using the `optimize_criteo.ipynb` notebook (also located
in `NVTabular/examples/`)
For a detailed parameter overview see `NVTabular/examples/MultiGPUBench.md`
"""
# Input
data_path = args.data_path[:-1] if args.data_path[-1] == "/" else args.data_path
freq_limit = args.freq_limit
out_files_per_proc = args.out_files_per_proc
high_card_columns = args.high_cards.split(",")
dashboard_port = args.dashboard_port
if args.protocol == "ucx":
UCX_TLS = os.environ.get("UCX_TLS", "tcp,cuda_copy,cuda_ipc,sockcm")
os.environ["UCX_TLS"] = UCX_TLS
# Cleanup output directory
base_dir = args.out_path[:-1] if args.out_path[-1] == "/" else args.out_path
dask_workdir = os.path.join(base_dir, "workdir")
output_path = os.path.join(base_dir, "output")
stats_path = os.path.join(base_dir, "stats")
setup_dirs(base_dir, dask_workdir, output_path, stats_path)
# Use Criteo dataset by default (for now)
cont_names = (
args.cont_names.split(",")
if args.cont_names
else ["I" + str(x) for x in range(1, 14)]
)
cat_names = (
args.cat_names.split(",")
if args.cat_names
else ["C" + str(x) for x in range(1, 27)]
)
label_name = ["label"]
# Specify Categorify/GroupbyStatistics options
tree_width = {}
cat_cache = {}
for col in cat_names:
if col in high_card_columns:
tree_width[col] = args.tree_width
cat_cache[col] = args.cat_cache_high
else:
tree_width[col] = 1
cat_cache[col] = args.cat_cache_low
# Use total device size to calculate args.device_limit_frac
device_size = device_mem_size(kind="total")
device_limit = int(args.device_limit_frac * device_size)
device_pool_size = int(args.device_pool_frac * device_size)
part_size = int(args.part_mem_frac * device_size)
# Parse shuffle option
shuffle = None
if args.shuffle == "PER_WORKER":
shuffle = nvt_io.Shuffle.PER_WORKER
elif args.shuffle == "PER_PARTITION":
shuffle = nvt_io.Shuffle.PER_PARTITION
# Check if any device memory is already occupied
for dev in args.devices.split(","):
fmem = _pynvml_mem_size(kind="free", index=int(dev))
used = (device_size - fmem) / 1e9
if used > 1.0:
warnings.warn(
f"BEWARE - {used} GB is already occupied on device {int(dev)}!"
)
# Setup LocalCUDACluster
if args.protocol == "tcp":
cluster = LocalCUDACluster(
protocol=args.protocol,
n_workers=args.n_workers,
CUDA_VISIBLE_DEVICES=args.devices,
device_memory_limit=device_limit,
local_directory=dask_workdir,
dashboard_address=":" + dashboard_port,
)
else:
cluster = LocalCUDACluster(
protocol=args.protocol,
n_workers=args.n_workers,
CUDA_VISIBLE_DEVICES=args.devices,
enable_nvlink=True,
device_memory_limit=device_limit,
local_directory=dask_workdir,
dashboard_address=":" + dashboard_port,
)
client = Client(cluster)
# Setup RMM pool
if args.device_pool_frac > 0.01:
setup_rmm_pool(client, device_pool_size)
# Define Dask NVTabular "Workflow"
if args.normalize:
cont_features = cont_names >> ops.FillMissing() >> ops.Normalize()
else:
cont_features = (
cont_names >> ops.FillMissing() >> ops.Clip(min_value=0) >> ops.LogOp()
)
cat_features = cat_names >> ops.Categorify(
out_path=stats_path,
tree_width=tree_width,
cat_cache=cat_cache,
freq_threshold=freq_limit,
search_sorted=not freq_limit,
on_host=not args.cats_on_device,
)
processor = Workflow(cat_features + cont_features + label_name, client=client)
dataset = Dataset(data_path, "parquet", part_size=part_size)
# Execute the dask graph
runtime = time.time()
processor.fit(dataset)
if args.profile is not None:
with performance_report(filename=args.profile):
processor.transform(dataset).to_parquet(
output_path=output_path,
num_threads=args.num_io_threads,
shuffle=shuffle,
out_files_per_proc=out_files_per_proc,
)
else:
processor.transform(dataset).to_parquet(
output_path=output_path,
num_threads=args.num_io_threads,
shuffle=shuffle,
out_files_per_proc=out_files_per_proc,
)
runtime = time.time() - runtime
print("\nDask-NVTabular DLRM/Criteo benchmark")
print("--------------------------------------")
print(f"partition size | {part_size}")
print(f"protocol | {args.protocol}")
print(f"device(s) | {args.devices}")
print(f"rmm-pool-frac | {(args.device_pool_frac)}")
print(f"out-files-per-proc | {args.out_files_per_proc}")
print(f"num_io_threads | {args.num_io_threads}")
print(f"shuffle | {args.shuffle}")
print(f"cats-on-device | {args.cats_on_device}")
print("======================================")
print(f"Runtime[s] | {runtime}")
print("======================================\n")
client.close()
|
def main(args):
"""Multi-GPU Criteo/DLRM Preprocessing Benchmark
This benchmark is designed to measure the time required to preprocess
the Criteo (1TB) dataset for Facebook’s DLRM model. The user must specify
the path of the raw dataset (using the `--data-path` flag), as well as the
output directory for all temporary/final data (using the `--out-path` flag)
Example Usage
-------------
python dask-nvtabular-criteo-benchmark.py
--data-path /path/to/criteo_parquet --out-path /out/dir/`
Dataset Requirements (Parquet)
------------------------------
This benchmark is designed with a parquet-formatted dataset in mind.
While a CSV-formatted dataset can be processed by NVTabular, converting
to parquet will yield significantly better performance. To convert your
dataset, try using the `optimize_criteo.ipynb` notebook (also located
in `NVTabular/examples/`)
For a detailed parameter overview see `NVTabular/examples/MultiGPUBench.md`
"""
# Input
data_path = args.data_path
freq_limit = args.freq_limit
out_files_per_proc = args.out_files_per_proc
high_card_columns = args.high_cards.split(",")
dashboard_port = args.dashboard_port
if args.protocol == "ucx":
UCX_TLS = os.environ.get("UCX_TLS", "tcp,cuda_copy,cuda_ipc,sockcm")
os.environ["UCX_TLS"] = UCX_TLS
# Cleanup output directory
BASE_DIR = args.out_path
dask_workdir = os.path.join(BASE_DIR, "workdir")
output_path = os.path.join(BASE_DIR, "output")
stats_path = os.path.join(BASE_DIR, "stats")
if not os.path.isdir(BASE_DIR):
os.mkdir(BASE_DIR)
for dir_path in (dask_workdir, output_path, stats_path):
if os.path.isdir(dir_path):
shutil.rmtree(dir_path)
os.mkdir(dir_path)
# Use Criteo dataset by default (for now)
cont_names = (
args.cont_names.split(",")
if args.cont_names
else ["I" + str(x) for x in range(1, 14)]
)
cat_names = (
args.cat_names.split(",")
if args.cat_names
else ["C" + str(x) for x in range(1, 27)]
)
label_name = ["label"]
# Specify Categorify/GroupbyStatistics options
tree_width = {}
cat_cache = {}
for col in cat_names:
if col in high_card_columns:
tree_width[col] = args.tree_width
cat_cache[col] = args.cat_cache_high
else:
tree_width[col] = 1
cat_cache[col] = args.cat_cache_low
# Use total device size to calculate args.device_limit_frac
device_size = device_mem_size(kind="total")
device_limit = int(args.device_limit_frac * device_size)
device_pool_size = int(args.device_pool_frac * device_size)
part_size = int(args.part_mem_frac * device_size)
# Parse shuffle option
shuffle = None
if args.shuffle == "PER_WORKER":
shuffle = nvt_io.Shuffle.PER_WORKER
elif args.shuffle == "PER_PARTITION":
shuffle = nvt_io.Shuffle.PER_PARTITION
# Check if any device memory is already occupied
for dev in args.devices.split(","):
fmem = _pynvml_mem_size(kind="free", index=int(dev))
used = (device_size - fmem) / 1e9
if used > 1.0:
warnings.warn(
f"BEWARE - {used} GB is already occupied on device {int(dev)}!"
)
# Setup LocalCUDACluster
if args.protocol == "tcp":
cluster = LocalCUDACluster(
protocol=args.protocol,
n_workers=args.n_workers,
CUDA_VISIBLE_DEVICES=args.devices,
device_memory_limit=device_limit,
local_directory=dask_workdir,
dashboard_address=":" + dashboard_port,
)
else:
cluster = LocalCUDACluster(
protocol=args.protocol,
n_workers=args.n_workers,
CUDA_VISIBLE_DEVICES=args.devices,
enable_nvlink=True,
device_memory_limit=device_limit,
local_directory=dask_workdir,
dashboard_address=":" + dashboard_port,
)
client = Client(cluster)
# Setup RMM pool
if args.device_pool_frac > 0.01:
setup_rmm_pool(client, device_pool_size)
# Define Dask NVTabular "Workflow"
if args.normalize:
cont_features = cont_names >> ops.FillMissing() >> ops.Normalize()
else:
cont_features = (
cont_names >> ops.FillMissing() >> ops.Clip(min_value=0) >> ops.LogOp()
)
cat_features = cat_names >> ops.Categorify(
out_path=stats_path,
tree_width=tree_width,
cat_cache=cat_cache,
freq_threshold=freq_limit,
search_sorted=not freq_limit,
on_host=not args.cats_on_device,
)
processor = Workflow(cat_features + cont_features + label_name, client=client)
dataset = Dataset(data_path, "parquet", part_size=part_size)
# Execute the dask graph
runtime = time.time()
processor.fit(dataset)
if args.profile is not None:
with performance_report(filename=args.profile):
processor.transform(dataset).to_parquet(
output_path=output_path,
num_threads=args.num_io_threads,
shuffle=shuffle,
out_files_per_proc=out_files_per_proc,
)
else:
processor.transform(dataset).to_parquet(
output_path=output_path,
num_threads=args.num_io_threads,
shuffle=shuffle,
out_files_per_proc=out_files_per_proc,
)
runtime = time.time() - runtime
print("\nDask-NVTabular DLRM/Criteo benchmark")
print("--------------------------------------")
print(f"partition size | {part_size}")
print(f"protocol | {args.protocol}")
print(f"device(s) | {args.devices}")
print(f"rmm-pool-frac | {(args.device_pool_frac)}")
print(f"out-files-per-proc | {args.out_files_per_proc}")
print(f"num_io_threads | {args.num_io_threads}")
print(f"shuffle | {args.shuffle}")
print(f"cats-on-device | {args.cats_on_device}")
print("======================================")
print(f"Runtime[s] | {runtime}")
print("======================================\n")
client.close()
|
https://github.com/NVIDIA/NVTabular/issues/557
|
(rapids) root@dafff4b22f48:/nvtabular# python examples/dask-nvtabular-criteo-benchmark.py -d 0,1,2,3,4,5,6,7 --data-path gs://merlin-datasets/crit_int_pq --out-path gs://merlin-datasets/output --freq-limit 0 --part-mem-frac 0.12 --device-limit-f
rac 0.7 --device-pool-frac 0.8
distributed.worker - WARNING - Compute Failed
Function: subgraph_callable
args: ( label I1 I2 I3 I4 I5 I6 I7 I8 I9 ... C17 C18 C19 C20 C21 C22 C23 C24 C25 C26
0 0 2.772589 5.808143 1.609438 2.397895 3.332205 0.000000 1.098612 4.442651 0.000000 ... -771205462 -1206449222 -864387787 359448199 -1761877609 357969245 -740331133 44548210 -842849922 -507617550
1 0 0.000000 5.147494 1.945910 4.584968 1.098612 0.000000 0.000000 4.394449 2.302585 ... <NA> -1206449222 -1793932789 <NA> <NA> <NA> <NA> -1441487878 809724924 -1775758394
2 0 2.639057 0.693147 0.693147 2.890372 0.000000 1.791759 0.000000 2.302585 2.833213 ... 1966974451 -1578429167 -1264946531 -2019528747 870435994 -322370806 -1701803791 2093085390 809724924 -317696227
3 0 4.110874 7.392032 0.693147 5.937536 3.610918 0.000
kwargs: {}
Exception: FileNotFoundError(2, 'No such file or directory')
Traceback (most recent call last):
File "examples/dask-nvtabular-criteo-benchmark.py", line 373, in <module>
main(parse_args())
File "examples/dask-nvtabular-criteo-benchmark.py", line 195, in main
output_path=output_path,
File "/nvtabular/nvtabular/workflow.py", line 876, in apply
dtypes=dtypes,
File "/nvtabular/nvtabular/workflow.py", line 991, in build_and_process_graph
num_threads=num_io_threads,
File "/nvtabular/nvtabular/workflow.py", line 1080, in ddf_to_dataset
num_threads,
File "/nvtabular/nvtabular/io/dask.py", line 110, in _ddf_to_dataset
out = client.compute(out).result()
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 225, in result
raise exc.with_traceback(tb)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/optimization.py", line 961, in __call__
return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 151, in get
result = _execute_task(task, cache)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 121, in _execute_task
return func(*(_execute_task(a, cache) for a in args))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/utils.py", line 29, in apply
return func(*args, **kwargs)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/core.py", line 5298, in apply_and_enforce
df = func(*args, **kwargs)
File "/nvtabular/nvtabular/workflow.py", line 723, in _aggregated_op
gdf = logic(gdf, columns_ctx, cols_grp, target_cols, stats_context)
File "/opt/conda/envs/rapids/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/nvtabular/nvtabular/ops/categorify.py", line 365, in apply_op
cat_names=cat_names,
File "/nvtabular/nvtabular/ops/categorify.py", line 871, in _encode
cache, path, columns=selection_r, cache=cat_cache, cats_only=True
File "/nvtabular/nvtabular/worker.py", line 84, in fetch_table_data
with open(path, "rb") as f:
FileNotFoundError: [Errno 2] No such file or directory: 'gs://merlin-datasets/output/stats/categories/unique.C1.parquet'
|
FileNotFoundError
|
def __init__(self, out_dir, **kwargs):
super().__init__(out_dir, **kwargs)
self.data_paths = []
self.data_files = []
self.data_writers = []
self.data_bios = []
self._lock = threading.RLock()
self.pwriter = self._pwriter
self.pwriter_kwargs = {}
|
def __init__(self, out_dir, **kwargs):
super().__init__(out_dir, **kwargs)
self.data_paths = []
self.data_writers = []
self.data_bios = []
self._lock = threading.RLock()
self.pwriter = self._pwriter
self.pwriter_kwargs = {}
|
https://github.com/NVIDIA/NVTabular/issues/557
|
(rapids) root@dafff4b22f48:/nvtabular# python examples/dask-nvtabular-criteo-benchmark.py -d 0,1,2,3,4,5,6,7 --data-path gs://merlin-datasets/crit_int_pq --out-path gs://merlin-datasets/output --freq-limit 0 --part-mem-frac 0.12 --device-limit-f
rac 0.7 --device-pool-frac 0.8
distributed.worker - WARNING - Compute Failed
Function: subgraph_callable
args: ( label I1 I2 I3 I4 I5 I6 I7 I8 I9 ... C17 C18 C19 C20 C21 C22 C23 C24 C25 C26
0 0 2.772589 5.808143 1.609438 2.397895 3.332205 0.000000 1.098612 4.442651 0.000000 ... -771205462 -1206449222 -864387787 359448199 -1761877609 357969245 -740331133 44548210 -842849922 -507617550
1 0 0.000000 5.147494 1.945910 4.584968 1.098612 0.000000 0.000000 4.394449 2.302585 ... <NA> -1206449222 -1793932789 <NA> <NA> <NA> <NA> -1441487878 809724924 -1775758394
2 0 2.639057 0.693147 0.693147 2.890372 0.000000 1.791759 0.000000 2.302585 2.833213 ... 1966974451 -1578429167 -1264946531 -2019528747 870435994 -322370806 -1701803791 2093085390 809724924 -317696227
3 0 4.110874 7.392032 0.693147 5.937536 3.610918 0.000
kwargs: {}
Exception: FileNotFoundError(2, 'No such file or directory')
Traceback (most recent call last):
File "examples/dask-nvtabular-criteo-benchmark.py", line 373, in <module>
main(parse_args())
File "examples/dask-nvtabular-criteo-benchmark.py", line 195, in main
output_path=output_path,
File "/nvtabular/nvtabular/workflow.py", line 876, in apply
dtypes=dtypes,
File "/nvtabular/nvtabular/workflow.py", line 991, in build_and_process_graph
num_threads=num_io_threads,
File "/nvtabular/nvtabular/workflow.py", line 1080, in ddf_to_dataset
num_threads,
File "/nvtabular/nvtabular/io/dask.py", line 110, in _ddf_to_dataset
out = client.compute(out).result()
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 225, in result
raise exc.with_traceback(tb)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/optimization.py", line 961, in __call__
return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 151, in get
result = _execute_task(task, cache)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 121, in _execute_task
return func(*(_execute_task(a, cache) for a in args))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/utils.py", line 29, in apply
return func(*args, **kwargs)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/core.py", line 5298, in apply_and_enforce
df = func(*args, **kwargs)
File "/nvtabular/nvtabular/workflow.py", line 723, in _aggregated_op
gdf = logic(gdf, columns_ctx, cols_grp, target_cols, stats_context)
File "/opt/conda/envs/rapids/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/nvtabular/nvtabular/ops/categorify.py", line 365, in apply_op
cat_names=cat_names,
File "/nvtabular/nvtabular/ops/categorify.py", line 871, in _encode
cache, path, columns=selection_r, cache=cat_cache, cats_only=True
File "/nvtabular/nvtabular/worker.py", line 84, in fetch_table_data
with open(path, "rb") as f:
FileNotFoundError: [Errno 2] No such file or directory: 'gs://merlin-datasets/output/stats/categories/unique.C1.parquet'
|
FileNotFoundError
|
def _append_writer(self, path, schema=None, add_args=None, add_kwargs=None):
# Add additional args and kwargs
_args = add_args or []
_kwargs = tlz.merge(self.pwriter_kwargs, add_kwargs or {})
if self.bytes_io:
bio = BytesIO()
self.data_bios.append(bio)
self.data_writers.append(self.pwriter(bio, *_args, **_kwargs))
else:
f = fsspec.open(path, mode="wb").open()
self.data_files.append(f)
self.data_writers.append(self.pwriter(f, *_args, **_kwargs))
|
def _append_writer(self, path, schema=None, add_args=None, add_kwargs=None):
# Add additional args and kwargs
_args = add_args or []
_kwargs = tlz.merge(self.pwriter_kwargs, add_kwargs or {})
if self.bytes_io:
bio = BytesIO()
self.data_bios.append(bio)
self.data_writers.append(self.pwriter(bio, *_args, **_kwargs))
else:
self.data_writers.append(self.pwriter(path, *_args, **_kwargs))
|
https://github.com/NVIDIA/NVTabular/issues/557
|
(rapids) root@dafff4b22f48:/nvtabular# python examples/dask-nvtabular-criteo-benchmark.py -d 0,1,2,3,4,5,6,7 --data-path gs://merlin-datasets/crit_int_pq --out-path gs://merlin-datasets/output --freq-limit 0 --part-mem-frac 0.12 --device-limit-f
rac 0.7 --device-pool-frac 0.8
distributed.worker - WARNING - Compute Failed
Function: subgraph_callable
args: ( label I1 I2 I3 I4 I5 I6 I7 I8 I9 ... C17 C18 C19 C20 C21 C22 C23 C24 C25 C26
0 0 2.772589 5.808143 1.609438 2.397895 3.332205 0.000000 1.098612 4.442651 0.000000 ... -771205462 -1206449222 -864387787 359448199 -1761877609 357969245 -740331133 44548210 -842849922 -507617550
1 0 0.000000 5.147494 1.945910 4.584968 1.098612 0.000000 0.000000 4.394449 2.302585 ... <NA> -1206449222 -1793932789 <NA> <NA> <NA> <NA> -1441487878 809724924 -1775758394
2 0 2.639057 0.693147 0.693147 2.890372 0.000000 1.791759 0.000000 2.302585 2.833213 ... 1966974451 -1578429167 -1264946531 -2019528747 870435994 -322370806 -1701803791 2093085390 809724924 -317696227
3 0 4.110874 7.392032 0.693147 5.937536 3.610918 0.000
kwargs: {}
Exception: FileNotFoundError(2, 'No such file or directory')
Traceback (most recent call last):
File "examples/dask-nvtabular-criteo-benchmark.py", line 373, in <module>
main(parse_args())
File "examples/dask-nvtabular-criteo-benchmark.py", line 195, in main
output_path=output_path,
File "/nvtabular/nvtabular/workflow.py", line 876, in apply
dtypes=dtypes,
File "/nvtabular/nvtabular/workflow.py", line 991, in build_and_process_graph
num_threads=num_io_threads,
File "/nvtabular/nvtabular/workflow.py", line 1080, in ddf_to_dataset
num_threads,
File "/nvtabular/nvtabular/io/dask.py", line 110, in _ddf_to_dataset
out = client.compute(out).result()
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 225, in result
raise exc.with_traceback(tb)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/optimization.py", line 961, in __call__
return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 151, in get
result = _execute_task(task, cache)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 121, in _execute_task
return func(*(_execute_task(a, cache) for a in args))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/utils.py", line 29, in apply
return func(*args, **kwargs)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/core.py", line 5298, in apply_and_enforce
df = func(*args, **kwargs)
File "/nvtabular/nvtabular/workflow.py", line 723, in _aggregated_op
gdf = logic(gdf, columns_ctx, cols_grp, target_cols, stats_context)
File "/opt/conda/envs/rapids/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/nvtabular/nvtabular/ops/categorify.py", line 365, in apply_op
cat_names=cat_names,
File "/nvtabular/nvtabular/ops/categorify.py", line 871, in _encode
cache, path, columns=selection_r, cache=cat_cache, cats_only=True
File "/nvtabular/nvtabular/worker.py", line 84, in fetch_table_data
with open(path, "rb") as f:
FileNotFoundError: [Errno 2] No such file or directory: 'gs://merlin-datasets/output/stats/categories/unique.C1.parquet'
|
FileNotFoundError
|
def _close_writers(self):
md_dict = {}
for writer, path in zip(self.data_writers, self.data_paths):
fn = path.split(self.fs.sep)[-1]
md_dict[fn] = writer.close(metadata_file_path=fn)
for f in self.data_files:
f.close()
return md_dict
|
def _close_writers(self):
md_dict = {}
for writer, path in zip(self.data_writers, self.data_paths):
fn = path.split(self.fs.sep)[-1]
md_dict[fn] = writer.close(metadata_file_path=fn)
return md_dict
|
https://github.com/NVIDIA/NVTabular/issues/557
|
(rapids) root@dafff4b22f48:/nvtabular# python examples/dask-nvtabular-criteo-benchmark.py -d 0,1,2,3,4,5,6,7 --data-path gs://merlin-datasets/crit_int_pq --out-path gs://merlin-datasets/output --freq-limit 0 --part-mem-frac 0.12 --device-limit-f
rac 0.7 --device-pool-frac 0.8
distributed.worker - WARNING - Compute Failed
Function: subgraph_callable
args: ( label I1 I2 I3 I4 I5 I6 I7 I8 I9 ... C17 C18 C19 C20 C21 C22 C23 C24 C25 C26
0 0 2.772589 5.808143 1.609438 2.397895 3.332205 0.000000 1.098612 4.442651 0.000000 ... -771205462 -1206449222 -864387787 359448199 -1761877609 357969245 -740331133 44548210 -842849922 -507617550
1 0 0.000000 5.147494 1.945910 4.584968 1.098612 0.000000 0.000000 4.394449 2.302585 ... <NA> -1206449222 -1793932789 <NA> <NA> <NA> <NA> -1441487878 809724924 -1775758394
2 0 2.639057 0.693147 0.693147 2.890372 0.000000 1.791759 0.000000 2.302585 2.833213 ... 1966974451 -1578429167 -1264946531 -2019528747 870435994 -322370806 -1701803791 2093085390 809724924 -317696227
3 0 4.110874 7.392032 0.693147 5.937536 3.610918 0.000
kwargs: {}
Exception: FileNotFoundError(2, 'No such file or directory')
Traceback (most recent call last):
File "examples/dask-nvtabular-criteo-benchmark.py", line 373, in <module>
main(parse_args())
File "examples/dask-nvtabular-criteo-benchmark.py", line 195, in main
output_path=output_path,
File "/nvtabular/nvtabular/workflow.py", line 876, in apply
dtypes=dtypes,
File "/nvtabular/nvtabular/workflow.py", line 991, in build_and_process_graph
num_threads=num_io_threads,
File "/nvtabular/nvtabular/workflow.py", line 1080, in ddf_to_dataset
num_threads,
File "/nvtabular/nvtabular/io/dask.py", line 110, in _ddf_to_dataset
out = client.compute(out).result()
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 225, in result
raise exc.with_traceback(tb)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/optimization.py", line 961, in __call__
return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 151, in get
result = _execute_task(task, cache)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 121, in _execute_task
return func(*(_execute_task(a, cache) for a in args))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/utils.py", line 29, in apply
return func(*args, **kwargs)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/core.py", line 5298, in apply_and_enforce
df = func(*args, **kwargs)
File "/nvtabular/nvtabular/workflow.py", line 723, in _aggregated_op
gdf = logic(gdf, columns_ctx, cols_grp, target_cols, stats_context)
File "/opt/conda/envs/rapids/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/nvtabular/nvtabular/ops/categorify.py", line 365, in apply_op
cat_names=cat_names,
File "/nvtabular/nvtabular/ops/categorify.py", line 871, in _encode
cache, path, columns=selection_r, cache=cat_cache, cats_only=True
File "/nvtabular/nvtabular/worker.py", line 84, in fetch_table_data
with open(path, "rb") as f:
FileNotFoundError: [Errno 2] No such file or directory: 'gs://merlin-datasets/output/stats/categories/unique.C1.parquet'
|
FileNotFoundError
|
def fetch_table_data(
table_cache,
path,
cache="disk",
cats_only=False,
reader=None,
columns=None,
**kwargs,
):
"""Utility to retrieve a cudf DataFrame from a cache (and add the
DataFrame to a cache if the element is missing). Note that `cats_only=True`
results in optimized logic for the `Categorify` transformation.
"""
table = table_cache.get(path, None)
if table and not isinstance(table, cudf.DataFrame):
if not cats_only:
return cudf.io.read_parquet(table, index=False)
df = cudf.io.read_parquet(table, index=False, columns=columns)
df.index.name = "labels"
df.reset_index(drop=False, inplace=True)
return df
reader = reader or cudf.io.read_parquet
if table is None:
if cache in ("device", "disk"):
table = reader(path, index=False, columns=columns, **kwargs)
elif cache == "host":
if reader == cudf.io.read_parquet:
# If the file is already in parquet format,
# we can just move the same bytes to host memory
with fsspec.open(path, "rb") as f:
table_cache[path] = BytesIO(f.read())
table = reader(
table_cache[path], index=False, columns=columns, **kwargs
)
else:
# Otherwise, we should convert the format to parquet
table = reader(path, index=False, columns=columns, **kwargs)
table_cache[path] = BytesIO()
table.to_parquet(table_cache[path])
if cats_only:
table.index.name = "labels"
table.reset_index(drop=False, inplace=True)
if cache == "device":
table_cache[path] = table.copy(deep=False)
return table
|
def fetch_table_data(
table_cache,
path,
cache="disk",
cats_only=False,
reader=None,
columns=None,
**kwargs,
):
"""Utility to retrieve a cudf DataFrame from a cache (and add the
DataFrame to a cache if the element is missing). Note that `cats_only=True`
results in optimized logic for the `Categorify` transformation.
"""
table = table_cache.get(path, None)
if table and not isinstance(table, cudf.DataFrame):
if not cats_only:
return cudf.io.read_parquet(table, index=False)
df = cudf.io.read_parquet(table, index=False, columns=columns)
df.index.name = "labels"
df.reset_index(drop=False, inplace=True)
return df
reader = reader or cudf.io.read_parquet
if table is None:
if cache in ("device", "disk"):
table = reader(path, index=False, columns=columns, **kwargs)
elif cache == "host":
if reader == cudf.io.read_parquet:
# If the file is already in parquet format,
# we can just move the same bytes to host memory
with open(path, "rb") as f:
table_cache[path] = BytesIO(f.read())
table = reader(
table_cache[path], index=False, columns=columns, **kwargs
)
else:
# Otherwise, we should convert the format to parquet
table = reader(path, index=False, columns=columns, **kwargs)
table_cache[path] = BytesIO()
table.to_parquet(table_cache[path])
if cats_only:
table.index.name = "labels"
table.reset_index(drop=False, inplace=True)
if cache == "device":
table_cache[path] = table.copy(deep=False)
return table
|
https://github.com/NVIDIA/NVTabular/issues/557
|
(rapids) root@dafff4b22f48:/nvtabular# python examples/dask-nvtabular-criteo-benchmark.py -d 0,1,2,3,4,5,6,7 --data-path gs://merlin-datasets/crit_int_pq --out-path gs://merlin-datasets/output --freq-limit 0 --part-mem-frac 0.12 --device-limit-f
rac 0.7 --device-pool-frac 0.8
distributed.worker - WARNING - Compute Failed
Function: subgraph_callable
args: ( label I1 I2 I3 I4 I5 I6 I7 I8 I9 ... C17 C18 C19 C20 C21 C22 C23 C24 C25 C26
0 0 2.772589 5.808143 1.609438 2.397895 3.332205 0.000000 1.098612 4.442651 0.000000 ... -771205462 -1206449222 -864387787 359448199 -1761877609 357969245 -740331133 44548210 -842849922 -507617550
1 0 0.000000 5.147494 1.945910 4.584968 1.098612 0.000000 0.000000 4.394449 2.302585 ... <NA> -1206449222 -1793932789 <NA> <NA> <NA> <NA> -1441487878 809724924 -1775758394
2 0 2.639057 0.693147 0.693147 2.890372 0.000000 1.791759 0.000000 2.302585 2.833213 ... 1966974451 -1578429167 -1264946531 -2019528747 870435994 -322370806 -1701803791 2093085390 809724924 -317696227
3 0 4.110874 7.392032 0.693147 5.937536 3.610918 0.000
kwargs: {}
Exception: FileNotFoundError(2, 'No such file or directory')
Traceback (most recent call last):
File "examples/dask-nvtabular-criteo-benchmark.py", line 373, in <module>
main(parse_args())
File "examples/dask-nvtabular-criteo-benchmark.py", line 195, in main
output_path=output_path,
File "/nvtabular/nvtabular/workflow.py", line 876, in apply
dtypes=dtypes,
File "/nvtabular/nvtabular/workflow.py", line 991, in build_and_process_graph
num_threads=num_io_threads,
File "/nvtabular/nvtabular/workflow.py", line 1080, in ddf_to_dataset
num_threads,
File "/nvtabular/nvtabular/io/dask.py", line 110, in _ddf_to_dataset
out = client.compute(out).result()
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 225, in result
raise exc.with_traceback(tb)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/optimization.py", line 961, in __call__
return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 151, in get
result = _execute_task(task, cache)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/core.py", line 121, in _execute_task
return func(*(_execute_task(a, cache) for a in args))
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/utils.py", line 29, in apply
return func(*args, **kwargs)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/core.py", line 5298, in apply_and_enforce
df = func(*args, **kwargs)
File "/nvtabular/nvtabular/workflow.py", line 723, in _aggregated_op
gdf = logic(gdf, columns_ctx, cols_grp, target_cols, stats_context)
File "/opt/conda/envs/rapids/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/nvtabular/nvtabular/ops/categorify.py", line 365, in apply_op
cat_names=cat_names,
File "/nvtabular/nvtabular/ops/categorify.py", line 871, in _encode
cache, path, columns=selection_r, cache=cat_cache, cats_only=True
File "/nvtabular/nvtabular/worker.py", line 84, in fetch_table_data
with open(path, "rb") as f:
FileNotFoundError: [Errno 2] No such file or directory: 'gs://merlin-datasets/output/stats/categories/unique.C1.parquet'
|
FileNotFoundError
|
def _chunkwise_moments(df):
df2 = cudf.DataFrame()
for col in df.columns:
df2[col] = df[col].astype("float64").pow(2)
vals = {
"df-count": df.count().to_frame().transpose(),
"df-sum": df.sum().astype("float64").to_frame().transpose(),
"df2-sum": df2.sum().to_frame().transpose(),
}
# NOTE: Perhaps we should convert to pandas here
# (since we know the results should be small)?
del df2
return vals
|
def _chunkwise_moments(df):
df2 = cudf.DataFrame()
for col in df.columns:
df2[col] = df[col].astype("float64").pow(2)
vals = {
"df-count": df.count().to_frame().transpose(),
"df-sum": df.sum().to_frame().transpose(),
"df2-sum": df2.sum().to_frame().transpose(),
}
# NOTE: Perhaps we should convert to pandas here
# (since we know the results should be small)?
del df2
return vals
|
https://github.com/NVIDIA/NVTabular/issues/432
|
/opt/conda/envs/rapids/lib/python3.7/site-packages/pandas/core/series.py:726: RuntimeWarning: invalid value encountered in sqrt
result = getattr(ufunc, method)(*inputs, **kwargs)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<timed eval> in <module>
/nvtabular0.3/NVTabular/nvtabular/workflow.py in apply(self, dataset, apply_offline, record_stats, shuffle, output_path, output_format, out_files_per_proc, num_io_threads, dtypes)
869 out_files_per_proc=out_files_per_proc,
870 num_io_threads=num_io_threads,
--> 871 dtypes=dtypes,
872 )
873 else:
/nvtabular0.3/NVTabular/nvtabular/workflow.py in build_and_process_graph(self, dataset, end_phase, output_path, record_stats, shuffle, output_format, out_files_per_proc, apply_ops, num_io_threads, dtypes)
968 self._base_phase = 0 # Set _base_phase
969 for idx, _ in enumerate(self.phases[:end]):
--> 970 self.exec_phase(idx, record_stats=record_stats, update_ddf=(idx == (end - 1)))
971 self._base_phase = 0 # Re-Set _base_phase
972
/nvtabular0.3/NVTabular/nvtabular/workflow.py in exec_phase(self, phase_index, record_stats, update_ddf)
755 _ddf = self.get_ddf()
756 if transforms:
--> 757 _ddf = self._aggregated_dask_transform(_ddf, transforms)
758
759 stats = []
/nvtabular0.3/NVTabular/nvtabular/workflow.py in _aggregated_dask_transform(self, ddf, transforms)
724 for transform in transforms:
725 columns_ctx, cols_grp, target_cols, logic, stats_context = transform
--> 726 meta = logic(meta, columns_ctx, cols_grp, target_cols, stats_context)
727 return ddf.map_partitions(self.__class__._aggregated_op, transforms, meta=meta)
728
/nvtabular0.3/NVTabular/nvtabular/ops/transform_operator.py in apply_op(self, gdf, columns_ctx, input_cols, target_cols, stats_context)
89 new_gdf = self.op_logic(gdf, target_columns, stats_context=stats_context)
90 self.update_columns_ctx(columns_ctx, input_cols, new_gdf.columns, target_columns)
---> 91 return self.assemble_new_df(gdf, new_gdf, target_columns)
92
93 def assemble_new_df(self, origin_gdf, new_gdf, target_columns):
/nvtabular0.3/NVTabular/nvtabular/ops/transform_operator.py in assemble_new_df(self, origin_gdf, new_gdf, target_columns)
96 return new_gdf
97 else:
---> 98 origin_gdf[target_columns] = new_gdf
99 return origin_gdf
100 return cudf.concat([origin_gdf, new_gdf], axis=1)
/opt/conda/envs/rapids/lib/python3.7/contextlib.py in inner(*args, **kwds)
72 def inner(*args, **kwds):
73 with self._recreate_cm():
---> 74 return func(*args, **kwds)
75 return inner
76
/opt/conda/envs/rapids/lib/python3.7/site-packages/cudf/core/dataframe.py in __setitem__(self, arg, value)
777 replace_df=value,
778 input_cols=arg,
--> 779 mask=None,
780 )
781 else:
/opt/conda/envs/rapids/lib/python3.7/site-packages/cudf/core/dataframe.py in _setitem_with_dataframe(input_df, replace_df, input_cols, mask)
7266 if len(input_cols) != len(replace_df.columns):
7267 raise ValueError(
-> 7268 "Number of Input Columns must be same replacement Dataframe"
7269 )
7270
ValueError: Number of Input Columns must be same replacement Dataframe
|
ValueError
|
def to_ddf(self, columns=None):
return dask_cudf.read_parquet(
self.paths,
columns=columns,
# can't omit reading the index in if we aren't being passed columns
index=None if columns is None else False,
gather_statistics=False,
split_row_groups=self.row_groups_per_part,
storage_options=self.storage_options,
)
|
def to_ddf(self, columns=None):
return dask_cudf.read_parquet(
self.paths,
columns=columns,
index=False,
gather_statistics=False,
split_row_groups=self.row_groups_per_part,
storage_options=self.storage_options,
)
|
https://github.com/NVIDIA/NVTabular/issues/409
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-13-b133e2b51cbf> in <module>
2 valid_dataset = nvt.Dataset(OUTPUT_BUCKET_FOLDER+'valid_gdf.parquet', part_mem_fraction=0.12)
3
----> 4 workflow.apply(train_dataset, record_stats=True, output_path=output_train_dir, shuffle=True, out_files_per_proc=5)
5 workflow.apply(valid_dataset, record_stats=False, output_path=output_valid_dir, shuffle=False, out_files_per_proc=5)
/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in apply(self, dataset, apply_offline, record_stats, shuffle, output_path, output_format, out_files_per_proc, num_io_threads, dtypes)
782 out_files_per_proc=out_files_per_proc,
783 num_io_threads=num_io_threads,
--> 784 dtypes=dtypes,
785 )
786 else:
/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in build_and_process_graph(self, dataset, end_phase, output_path, record_stats, shuffle, output_format, out_files_per_proc, apply_ops, num_io_threads, dtypes)
885 self._base_phase = 0 # Set _base_phase
886 for idx, _ in enumerate(self.phases[:end]):
--> 887 self.exec_phase(idx, record_stats=record_stats, update_ddf=(idx == (end - 1)))
888 self._base_phase = 0 # Re-Set _base_phase
889
/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in exec_phase(self, phase_index, record_stats, update_ddf)
631
632 # Perform transforms as single dask task (per ddf partition)
--> 633 _ddf = self.get_ddf()
634 if transforms:
635 _ddf = self._aggregated_dask_transform(_ddf, transforms)
/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in get_ddf(self)
587 elif isinstance(self.ddf, Dataset):
588 columns = self.columns_ctx["all"]["base"]
--> 589 return self.ddf.to_ddf(columns=columns, shuffle=self._shuffle_parts)
590 return self.ddf
591
/rapids/notebooks/benf/NVTabular/nvtabular/io/dataset.py in to_ddf(self, columns, shuffle, seed)
263 """
264 # Use DatasetEngine to create ddf
--> 265 ddf = self.engine.to_ddf(columns=columns)
266
267 # Shuffle the partitions of ddf (optional)
/rapids/notebooks/benf/NVTabular/nvtabular/io/parquet.py in to_ddf(self, columns)
102 gather_statistics=False,
103 split_row_groups=self.row_groups_per_part,
--> 104 storage_options=self.storage_options,
105 )
106
/opt/conda/envs/rapids/lib/python3.7/site-packages/dask_cudf/io/parquet.py in read_parquet(path, columns, split_row_groups, row_groups_per_part, **kwargs)
192 split_row_groups=split_row_groups,
193 engine=CudfEngine,
--> 194 **kwargs,
195 )
196
/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in read_parquet(path, columns, filters, categories, index, storage_options, engine, gather_statistics, split_row_groups, chunksize, **kwargs)
248 # Modify `meta` dataframe accordingly
249 meta, index, columns = set_index_columns(
--> 250 meta, index, columns, index_in_columns, auto_index_allowed
251 )
252 if meta.index.name == NONE_LABEL:
/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in set_index_columns(meta, index, columns, index_in_columns, auto_index_allowed)
771 "The following columns were not found in the dataset %s\n"
772 "The following columns were found %s"
--> 773 % (set(columns) - set(meta.columns), meta.columns)
774 )
775
ValueError: The following columns were not found in the dataset {'document_id_promo_count', 'publish_time_days_since_published', 'campaign_id_clicked_sum_ctr', 'ad_id_count', 'ad_id_clicked_sum_ctr', 'source_id_clicked_sum_ctr', 'publish_time_promo_days_since_published', 'advertiser_id_clicked_sum_ctr', 'document_id_promo_clicked_sum_ctr', 'publisher_id_clicked_sum_ctr', 'geo_location_country', 'geo_location_state'}
The following columns were found Index(['display_id', 'ad_id', 'clicked', 'uuid', 'document_id', 'timestamp',
'platform', 'geo_location', 'document_id_promo', 'campaign_id',
'advertiser_id', 'source_id', 'publisher_id', 'publish_time',
'source_id_promo', 'publisher_id_promo', 'publish_time_promo',
'day_event'],
dtype='object')
|
ValueError
|
def get_ddf(self):
if self.ddf is None:
raise ValueError("No dask_cudf frame available.")
elif isinstance(self.ddf, Dataset):
# Right now we can't distinguish between input columns and generated columns
# in the dataset, we don't limit the columm set right now in the to_ddf call
# (https://github.com/NVIDIA/NVTabular/issues/409 )
return self.ddf.to_ddf(shuffle=self._shuffle_parts)
return self.ddf
|
def get_ddf(self):
if self.ddf is None:
raise ValueError("No dask_cudf frame available.")
elif isinstance(self.ddf, Dataset):
columns = self.columns_ctx["all"]["base"]
return self.ddf.to_ddf(columns=columns, shuffle=self._shuffle_parts)
return self.ddf
|
https://github.com/NVIDIA/NVTabular/issues/409
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-13-b133e2b51cbf> in <module>
2 valid_dataset = nvt.Dataset(OUTPUT_BUCKET_FOLDER+'valid_gdf.parquet', part_mem_fraction=0.12)
3
----> 4 workflow.apply(train_dataset, record_stats=True, output_path=output_train_dir, shuffle=True, out_files_per_proc=5)
5 workflow.apply(valid_dataset, record_stats=False, output_path=output_valid_dir, shuffle=False, out_files_per_proc=5)
/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in apply(self, dataset, apply_offline, record_stats, shuffle, output_path, output_format, out_files_per_proc, num_io_threads, dtypes)
782 out_files_per_proc=out_files_per_proc,
783 num_io_threads=num_io_threads,
--> 784 dtypes=dtypes,
785 )
786 else:
/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in build_and_process_graph(self, dataset, end_phase, output_path, record_stats, shuffle, output_format, out_files_per_proc, apply_ops, num_io_threads, dtypes)
885 self._base_phase = 0 # Set _base_phase
886 for idx, _ in enumerate(self.phases[:end]):
--> 887 self.exec_phase(idx, record_stats=record_stats, update_ddf=(idx == (end - 1)))
888 self._base_phase = 0 # Re-Set _base_phase
889
/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in exec_phase(self, phase_index, record_stats, update_ddf)
631
632 # Perform transforms as single dask task (per ddf partition)
--> 633 _ddf = self.get_ddf()
634 if transforms:
635 _ddf = self._aggregated_dask_transform(_ddf, transforms)
/rapids/notebooks/benf/NVTabular/nvtabular/workflow.py in get_ddf(self)
587 elif isinstance(self.ddf, Dataset):
588 columns = self.columns_ctx["all"]["base"]
--> 589 return self.ddf.to_ddf(columns=columns, shuffle=self._shuffle_parts)
590 return self.ddf
591
/rapids/notebooks/benf/NVTabular/nvtabular/io/dataset.py in to_ddf(self, columns, shuffle, seed)
263 """
264 # Use DatasetEngine to create ddf
--> 265 ddf = self.engine.to_ddf(columns=columns)
266
267 # Shuffle the partitions of ddf (optional)
/rapids/notebooks/benf/NVTabular/nvtabular/io/parquet.py in to_ddf(self, columns)
102 gather_statistics=False,
103 split_row_groups=self.row_groups_per_part,
--> 104 storage_options=self.storage_options,
105 )
106
/opt/conda/envs/rapids/lib/python3.7/site-packages/dask_cudf/io/parquet.py in read_parquet(path, columns, split_row_groups, row_groups_per_part, **kwargs)
192 split_row_groups=split_row_groups,
193 engine=CudfEngine,
--> 194 **kwargs,
195 )
196
/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in read_parquet(path, columns, filters, categories, index, storage_options, engine, gather_statistics, split_row_groups, chunksize, **kwargs)
248 # Modify `meta` dataframe accordingly
249 meta, index, columns = set_index_columns(
--> 250 meta, index, columns, index_in_columns, auto_index_allowed
251 )
252 if meta.index.name == NONE_LABEL:
/opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in set_index_columns(meta, index, columns, index_in_columns, auto_index_allowed)
771 "The following columns were not found in the dataset %s\n"
772 "The following columns were found %s"
--> 773 % (set(columns) - set(meta.columns), meta.columns)
774 )
775
ValueError: The following columns were not found in the dataset {'document_id_promo_count', 'publish_time_days_since_published', 'campaign_id_clicked_sum_ctr', 'ad_id_count', 'ad_id_clicked_sum_ctr', 'source_id_clicked_sum_ctr', 'publish_time_promo_days_since_published', 'advertiser_id_clicked_sum_ctr', 'document_id_promo_clicked_sum_ctr', 'publisher_id_clicked_sum_ctr', 'geo_location_country', 'geo_location_state'}
The following columns were found Index(['display_id', 'ad_id', 'clicked', 'uuid', 'document_id', 'timestamp',
'platform', 'geo_location', 'document_id_promo', 'campaign_id',
'advertiser_id', 'source_id', 'publisher_id', 'publish_time',
'source_id_promo', 'publisher_id_promo', 'publish_time_promo',
'day_event'],
dtype='object')
|
ValueError
|
def add_data(self, gdf):
# Populate columns idxs
if not self.col_idx:
for i, x in enumerate(gdf.columns.values):
self.col_idx[str(x)] = i
# list columns in cudf don't currently support chunked writing in parquet.
# hack around this by just writing a single file with this partition
# this restriction can be removed once cudf supports chunked writing
# in parquet
if any(is_list_dtype(gdf[col].dtype) for col in gdf.columns):
self._write_table(0, gdf, True)
return
# Generate `ind` array to map each row to an output file.
# This approach is certainly more optimized for shuffling
# than it is for non-shuffling, but using a single code
# path is probably worth the (possible) minor overhead.
nrows = gdf.shape[0]
typ = np.min_scalar_type(nrows * 2)
if self.shuffle:
ind = cp.random.choice(cp.arange(self.num_out_files, dtype=typ), nrows)
else:
ind = cp.arange(nrows, dtype=typ)
cp.floor_divide(ind, math.ceil(nrows / self.num_out_files), out=ind)
for x, group in enumerate(
gdf.scatter_by_map(ind, map_size=self.num_out_files, keep_index=False)
):
self.num_samples[x] += len(group)
if self.num_threads > 1:
self.queue.put((x, group))
else:
self._write_table(x, group)
# wait for all writes to finish before exiting
# (so that we aren't using memory)
if self.num_threads > 1:
self.queue.join()
|
def add_data(self, gdf):
# Populate columns idxs
if not self.col_idx:
for i, x in enumerate(gdf.columns.values):
self.col_idx[str(x)] = i
# list columns in cudf don't currently support chunked writing in parquet.
# hack around this by just writing a single file with this partition
# this restriction can be removed once cudf supports chunked writing
# in parquet
if any(is_list_dtype(gdf[col].dtype) for col in gdf.columns):
self._write_table(gdf, 0, True)
return
# Generate `ind` array to map each row to an output file.
# This approach is certainly more optimized for shuffling
# than it is for non-shuffling, but using a single code
# path is probably worth the (possible) minor overhead.
nrows = gdf.shape[0]
typ = np.min_scalar_type(nrows * 2)
if self.shuffle:
ind = cp.random.choice(cp.arange(self.num_out_files, dtype=typ), nrows)
else:
ind = cp.arange(nrows, dtype=typ)
cp.floor_divide(ind, math.ceil(nrows / self.num_out_files), out=ind)
for x, group in enumerate(
gdf.scatter_by_map(ind, map_size=self.num_out_files, keep_index=False)
):
self.num_samples[x] += len(group)
if self.num_threads > 1:
self.queue.put((x, group))
else:
self._write_table(x, group)
# wait for all writes to finish before exiting
# (so that we aren't using memory)
if self.num_threads > 1:
self.queue.join()
|
https://github.com/NVIDIA/NVTabular/issues/381
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-19-f93c44c3b381> in <module>
11 proc.add_preprocess(JoinExternal(df_grouped, on= ['doc_id'], on_ext= ['doc_id'], kind_ext=kind_ext, columns_ext=columns_ext, cache='device', how='left'))
12 train_dataset = nvt.Dataset(df2)
---> 13 proc.apply(train_dataset, apply_offline=True, record_stats=True, output_path='./output/', shuffle=True, out_files_per_proc=1)
~/ronaya/NVTabular/nvtabular/workflow.py in apply(self, dataset, apply_offline, record_stats, shuffle, output_path, output_format, out_files_per_proc, num_io_threads)
738 output_format=output_format,
739 out_files_per_proc=out_files_per_proc,
--> 740 num_io_threads=num_io_threads,
741 )
742 else:
~/ronaya/NVTabular/nvtabular/workflow.py in build_and_process_graph(self, dataset, end_phase, output_path, record_stats, shuffle, output_format, out_files_per_proc, apply_ops, num_io_threads)
845 shuffle=shuffle,
846 out_files_per_proc=out_files_per_proc,
--> 847 num_threads=num_io_threads,
848 )
849
~/ronaya/NVTabular/nvtabular/workflow.py in ddf_to_dataset(self, output_path, shuffle, out_files_per_proc, output_format, num_threads)
931 output_format,
932 self.client,
--> 933 num_threads,
934 )
935 return
~/ronaya/NVTabular/nvtabular/io/dask.py in _ddf_to_dataset(ddf, fs, output_path, shuffle, out_files_per_proc, cat_names, cont_names, label_names, output_format, client, num_threads)
110 out = client.compute(out).result()
111 else:
--> 112 out = dask.compute(out, scheduler="synchronous")[0]
113
114 # Follow-up Shuffling and _metadata creation
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/base.py in compute(*args, **kwargs)
450 postcomputes.append(x.__dask_postcompute__())
451
--> 452 results = schedule(dsk, keys, **kwargs)
453 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
454
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in get_sync(dsk, keys, **kwargs)
525 """
526 kwargs.pop("num_workers", None) # if num_workers present, remove it
--> 527 return get_async(apply_sync, 1, dsk, keys, **kwargs)
528
529
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in get_async(apply_async, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, **kwargs)
492
493 while state["ready"] and len(state["running"]) < num_workers:
--> 494 fire_task()
495
496 succeeded = True
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in fire_task()
464 pack_exception,
465 ),
--> 466 callback=queue.put,
467 )
468
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in apply_sync(func, args, kwds, callback)
514 def apply_sync(func, args=(), kwds={}, callback=None):
515 """ A naive synchronous version of apply_async """
--> 516 res = func(*args, **kwds)
517 if callback is not None:
518 callback(res)
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in execute_task(key, task_info, dumps, loads, get_id, pack_exception)
225 failed = False
226 except BaseException as e:
--> 227 result = pack_exception(e, dumps)
228 failed = True
229 return key, result, failed
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in execute_task(key, task_info, dumps, loads, get_id, pack_exception)
220 try:
221 task, data = loads(task_info)
--> 222 result = _execute_task(task, data)
223 id = get_id()
224 result = dumps((result, id))
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/core.py in _execute_task(arg, cache, dsk)
119 # temporaries by their reference count and can execute certain
120 # operations in-place.
--> 121 return func(*(_execute_task(a, cache) for a in args))
122 elif not ishashable(arg):
123 return arg
~/miniconda3/envs/1019/lib/python3.7/contextlib.py in inner(*args, **kwds)
72 def inner(*args, **kwds):
73 with self._recreate_cm():
---> 74 return func(*args, **kwds)
75 return inner
76
~/ronaya/NVTabular/nvtabular/io/dask.py in _write_output_partition(gdf, processed_path, shuffle, out_files_per_proc, fs, cat_names, cont_names, label_names, output_format, num_threads)
61
62 # Add data
---> 63 writer.add_data(gdf)
64
65 return gdf_size
~/miniconda3/envs/1019/lib/python3.7/contextlib.py in inner(*args, **kwds)
72 def inner(*args, **kwds):
73 with self._recreate_cm():
---> 74 return func(*args, **kwds)
75 return inner
76
~/ronaya/NVTabular/nvtabular/io/writer.py in add_data(self, gdf)
125 # in parquet
126 if any(is_list_dtype(gdf[col].dtype) for col in gdf.columns):
--> 127 self._write_table(gdf, 0, True)
128 return
129
~/ronaya/NVTabular/nvtabular/io/parquet.py in _write_table(self, idx, data, has_list_column)
210 # write out a new file, rather than stream multiple chunks to a single file
211 filename = self._get_filename(len(self.data_paths))
--> 212 data.to_parquet(filename)
213 self.data_paths.append(filename)
214 else:
AttributeError: 'int' object has no attribute 'to_parquet'
|
AttributeError
|
def __init__(
self,
paths,
part_size,
storage_options,
row_groups_per_part=None,
legacy=False,
batch_size=None,
):
# TODO: Improve dask_cudf.read_parquet performance so that
# this class can be slimmed down.
super().__init__(paths, part_size, storage_options)
self.batch_size = batch_size
self._metadata, self._base = self.metadata
self._pieces = None
if row_groups_per_part is None:
file_path = self._metadata.row_group(0).column(0).file_path
path0 = (
self.fs.sep.join([self._base, file_path])
if file_path != ""
else self._base # This is a single file
)
if row_groups_per_part is None:
rg_byte_size_0 = _memory_usage(
cudf.io.read_parquet(path0, row_groups=0, row_group=0)
)
row_groups_per_part = self.part_size / rg_byte_size_0
if row_groups_per_part < 1.0:
warnings.warn(
f"Row group size {rg_byte_size_0} is bigger than requested part_size "
f"{self.part_size}"
)
row_groups_per_part = 1.0
self.row_groups_per_part = int(row_groups_per_part)
assert self.row_groups_per_part > 0
|
def __init__(
self,
paths,
part_size,
storage_options,
row_groups_per_part=None,
legacy=False,
batch_size=None,
):
# TODO: Improve dask_cudf.read_parquet performance so that
# this class can be slimmed down.
super().__init__(paths, part_size, storage_options)
self.batch_size = batch_size
self._metadata, self._base = self.metadata
self._pieces = None
if row_groups_per_part is None:
file_path = self._metadata.row_group(0).column(0).file_path
path0 = (
self.fs.sep.join([self._base, file_path])
if file_path != ""
else self._base # This is a single file
)
if row_groups_per_part is None:
rg_byte_size_0 = (
cudf.io.read_parquet(path0, row_groups=0, row_group=0)
.memory_usage(deep=True, index=True)
.sum()
)
row_groups_per_part = self.part_size / rg_byte_size_0
if row_groups_per_part < 1.0:
warnings.warn(
f"Row group size {rg_byte_size_0} is bigger than requested part_size "
f"{self.part_size}"
)
row_groups_per_part = 1.0
self.row_groups_per_part = int(row_groups_per_part)
assert self.row_groups_per_part > 0
|
https://github.com/NVIDIA/NVTabular/issues/363
|
Traceback (most recent call last):
File "main.py", line 106, in <module>
main(args)
File "main.py", line 61, in main
train_paths, engine="parquet", part_mem_fraction=float(args.gpu_mem_frac)
File "/root/miniconda/lib/python3.7/site-packages/nvtabular/io/dataset.py", line 224, in __init__
paths, part_size, storage_options=storage_options, **kwargs
File "/root/miniconda/lib/python3.7/site-packages/nvtabular/io/parquet.py", line 69, in __init__
.memory_usage(deep=True, index=True)
File "/root/miniconda/lib/python3.7/site-packages/cudf/core/dataframe.py", line 842, in memory_usage
sizes = [col._memory_usage(deep=deep) for col in self._data.columns]
File "/root/miniconda/lib/python3.7/site-packages/cudf/core/dataframe.py", line 842, in <listcomp>
sizes = [col._memory_usage(deep=deep) for col in self._data.columns]
File "/root/miniconda/lib/python3.7/site-packages/cudf/core/column/column.py", line 299, in _memory_usage
return self.__sizeof__()
File "/root/miniconda/lib/python3.7/site-packages/cudf/core/column/column.py", line 183, in __sizeof__
n = self.data.size
File "cudf/_lib/column.pyx", line 99, in cudf._lib.column.Column.data.__get__
AttributeError: 'ListDtype' object has no attribute 'itemsize'
|
AttributeError
|
def __init__(self, *args, **kwargs):
super().__init__(*args)
self._meta = {}
self.csv_kwargs = kwargs
self.names = self.csv_kwargs.get("names", None)
# CSV reader needs a list of files
# (Assume flat directory structure if this is a dir)
if len(self.paths) == 1 and self.fs.isdir(self.paths[0]):
self.paths = self.fs.glob(self.fs.sep.join([self.paths[0], "*"]))
|
def __init__(self, *args, **kwargs):
super().__init__(*args)
self._meta = {}
self.names = kwargs.pop("names", None)
self.csv_kwargs = kwargs
# CSV reader needs a list of files
# (Assume flat directory structure if this is a dir)
if len(self.paths) == 1 and self.fs.isdir(self.paths[0]):
self.paths = self.fs.glob(self.fs.sep.join([self.paths[0], "*"]))
|
https://github.com/NVIDIA/NVTabular/issues/85
|
AttributeErrorTraceback (most recent call last)
<ipython-input-1-84910288ec3f> in <module>
44 del gdf
45 path_out = '/raid/criteo/tests/jp_csv_orig/'
---> 46 file_to_pq(train_set, 'csv', output_folder=path_out, cols=cols, dtypes=dtypes)
<ipython-input-1-84910288ec3f> in file_to_pq(target_files, file_type, output_folder, cols, dtypes)
34 old_file_path = None
35 writer = None
---> 36 for gdf in tar:
37 # gdf.to_parquet(output_folder)
38 file_path = os.path.join(output_folder, os.path.split(tar.itr.file_path)[1].split('.')[0])
/nvtabular/nvtabular/io.py in __iter__(self)
329 def __iter__(self):
330 for path in self.paths:
--> 331 yield from GPUFileIterator(path, **self.kwargs)
332
333
/nvtabular/nvtabular/io.py in __iter__(self)
271 for chunk in self.engine:
272 if self.dtypes:
--> 273 self._set_dtypes(chunk)
274 yield chunk
275 chunk = None
AttributeError: 'GPUFileIterator' object has no attribute '_set_dtypes'
|
AttributeError
|
def to_ddf(self, columns=None):
return dask_cudf.read_csv(self.paths, chunksize=self.part_size, **self.csv_kwargs)[
columns
]
|
def to_ddf(self, columns=None):
return dask_cudf.read_csv(
self.paths, names=self.names, chunksize=self.part_size, **self.csv_kwargs
)[columns]
|
https://github.com/NVIDIA/NVTabular/issues/85
|
AttributeErrorTraceback (most recent call last)
<ipython-input-1-84910288ec3f> in <module>
44 del gdf
45 path_out = '/raid/criteo/tests/jp_csv_orig/'
---> 46 file_to_pq(train_set, 'csv', output_folder=path_out, cols=cols, dtypes=dtypes)
<ipython-input-1-84910288ec3f> in file_to_pq(target_files, file_type, output_folder, cols, dtypes)
34 old_file_path = None
35 writer = None
---> 36 for gdf in tar:
37 # gdf.to_parquet(output_folder)
38 file_path = os.path.join(output_folder, os.path.split(tar.itr.file_path)[1].split('.')[0])
/nvtabular/nvtabular/io.py in __iter__(self)
329 def __iter__(self):
330 for path in self.paths:
--> 331 yield from GPUFileIterator(path, **self.kwargs)
332
333
/nvtabular/nvtabular/io.py in __iter__(self)
271 for chunk in self.engine:
272 if self.dtypes:
--> 273 self._set_dtypes(chunk)
274 yield chunk
275 chunk = None
AttributeError: 'GPUFileIterator' object has no attribute '_set_dtypes'
|
AttributeError
|
def _predict(self, X):
"""Collect results from clf.predict calls."""
if self.refit:
return np.asarray([clf.predict(X) for clf in self.clfs_]).T
else:
return np.asarray([self.le_.transform(clf.predict(X)) for clf in self.clfs_]).T
|
def _predict(self, X):
"""Collect results from clf.predict calls."""
return np.asarray([clf.predict(X) for clf in self.clfs_]).T
|
https://github.com/rasbt/mlxtend/issues/321
|
Traceback (most recent call last):
File "/_mlxtend_bug/reproduce.py", line 16, in <module>
print(clf.predict(test))
File "/venv/py3/lib/python3.4/site-packages/mlxtend/classifier/ensemble_vote.py", line 197, in predict
arr=predictions)
File "/venv/py3/lib/python3.4/site-packages/numpy/lib/shape_base.py", line 132, in apply_along_axis
res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))
File "/venv/py3/lib/python3.4/site-packages/mlxtend/classifier/ensemble_vote.py", line 195, in <lambda>
weights=self.weights)),
TypeError: Cannot cast array data from dtype('<U1') to dtype('int64') according to the rule 'safe'
|
TypeError
|
def transform(
self,
xx: Any,
yy: Any,
zz: Any = None,
tt: Any = None,
radians: bool = False,
errcheck: bool = False,
direction: Union[TransformDirection, str] = TransformDirection.FORWARD,
) -> Any:
"""
Transform points between two coordinate systems.
.. versionadded:: 2.1.1 errcheck
.. versionadded:: 2.2.0 direction
Parameters
----------
xx: scalar or array (numpy or python)
Input x coordinate(s).
yy: scalar or array (numpy or python)
Input y coordinate(s).
zz: scalar or array (numpy or python), optional
Input z coordinate(s).
tt: scalar or array (numpy or python), optional
Input time coordinate(s).
radians: boolean, optional
If True, will expect input data to be in radians and will return radians
if the projection is geographic. Default is False (degrees). Ignored for
pipeline transformations.
errcheck: boolean, optional (default False)
If True an exception is raised if the transformation is invalid.
By default errcheck=False and an invalid transformation
returns ``inf`` and no exception is raised.
direction: pyproj.enums.TransformDirection, optional
The direction of the transform.
Default is :attr:`pyproj.enums.TransformDirection.FORWARD`.
Example:
>>> from pyproj import Transformer
>>> transformer = Transformer.from_crs("epsg:4326", "epsg:3857")
>>> x3, y3 = transformer.transform(33, 98)
>>> "%.3f %.3f" % (x3, y3)
'10909310.098 3895303.963'
>>> pipeline_str = (
... "+proj=pipeline +step +proj=longlat +ellps=WGS84 "
... "+step +proj=unitconvert +xy_in=rad +xy_out=deg"
... )
>>> pipe_trans = Transformer.from_pipeline(pipeline_str)
>>> xt, yt = pipe_trans.transform(2.1, 0.001)
>>> "%.3f %.3f" % (xt, yt)
'2.100 0.001'
>>> transproj = Transformer.from_crs(
... {"proj":'geocent', "ellps":'WGS84', "datum":'WGS84'},
... "EPSG:4326",
... always_xy=True,
... )
>>> xpj, ypj, zpj = transproj.transform(
... -2704026.010,
... -4253051.810,
... 3895878.820,
... radians=True,
... )
>>> "%.3f %.3f %.3f" % (xpj, ypj, zpj)
'-2.137 0.661 -20.531'
>>> transprojr = Transformer.from_crs(
... "EPSG:4326",
... {"proj":'geocent', "ellps":'WGS84', "datum":'WGS84'},
... always_xy=True,
... )
>>> xpjr, ypjr, zpjr = transprojr.transform(xpj, ypj, zpj, radians=True)
>>> "%.3f %.3f %.3f" % (xpjr, ypjr, zpjr)
'-2704026.010 -4253051.810 3895878.820'
>>> transformer = Transformer.from_proj("epsg:4326", 4326, skip_equivalent=True)
>>> xeq, yeq = transformer.transform(33, 98)
>>> "%.0f %.0f" % (xeq, yeq)
'33 98'
"""
# process inputs, making copies that support buffer API.
inx, xisfloat, xislist, xistuple = _copytobuffer(xx)
iny, yisfloat, yislist, yistuple = _copytobuffer(yy)
if zz is not None:
inz, zisfloat, zislist, zistuple = _copytobuffer(zz)
else:
inz = None
if tt is not None:
intime, tisfloat, tislist, tistuple = _copytobuffer(tt)
else:
intime = None
# call pj_transform. inx,iny,inz buffers modified in place.
self._transformer._transform(
inx,
iny,
inz=inz,
intime=intime,
direction=direction,
radians=radians,
errcheck=errcheck,
)
# if inputs were lists, tuples or floats, convert back.
outx = _convertback(xisfloat, xislist, xistuple, inx)
outy = _convertback(yisfloat, yislist, xistuple, iny)
return_data = (outx, outy)
if inz is not None:
return_data += ( # type: ignore
_convertback(zisfloat, zislist, zistuple, inz),
)
if intime is not None:
return_data += ( # type: ignore
_convertback(tisfloat, tislist, tistuple, intime),
)
return return_data
|
def transform(
self,
xx: Any,
yy: Any,
zz: Any = None,
tt: Any = None,
radians: bool = False,
errcheck: bool = False,
direction: Union[TransformDirection, str] = TransformDirection.FORWARD,
) -> Any:
"""
Transform points between two coordinate systems.
.. versionadded:: 2.1.1 errcheck
.. versionadded:: 2.2.0 direction
Parameters
----------
xx: scalar or array (numpy or python)
Input x coordinate(s).
yy: scalar or array (numpy or python)
Input y coordinate(s).
zz: scalar or array (numpy or python), optional
Input z coordinate(s).
tt: scalar or array (numpy or python), optional
Input time coordinate(s).
radians: boolean, optional
If True, will expect input data to be in radians and will return radians
if the projection is geographic. Default is False (degrees). Ignored for
pipeline transformations.
errcheck: boolean, optional (default False)
If True an exception is raised if the transformation is invalid.
By default errcheck=False and an invalid transformation
returns ``inf`` and no exception is raised.
direction: pyproj.enums.TransformDirection, optional
The direction of the transform.
Default is :attr:`pyproj.enums.TransformDirection.FORWARD`.
Example:
>>> from pyproj import Transformer
>>> transformer = Transformer.from_crs("epsg:4326", "epsg:3857")
>>> x3, y3 = transformer.transform(33, 98)
>>> "%.3f %.3f" % (x3, y3)
'10909310.098 3895303.963'
>>> pipeline_str = (
... "+proj=pipeline +step +proj=longlat +ellps=WGS84 "
... "+step +proj=unitconvert +xy_in=rad +xy_out=deg"
... )
>>> pipe_trans = Transformer.from_pipeline(pipeline_str)
>>> xt, yt = pipe_trans.transform(2.1, 0.001)
>>> "%.3f %.3f" % (xt, yt)
'120.321 0.057'
>>> transproj = Transformer.from_crs(
... {"proj":'geocent', "ellps":'WGS84', "datum":'WGS84'},
... "EPSG:4326",
... always_xy=True,
... )
>>> xpj, ypj, zpj = transproj.transform(
... -2704026.010,
... -4253051.810,
... 3895878.820,
... radians=True,
... )
>>> "%.3f %.3f %.3f" % (xpj, ypj, zpj)
'-2.137 0.661 -20.531'
>>> transprojr = Transformer.from_crs(
... "EPSG:4326",
... {"proj":'geocent', "ellps":'WGS84', "datum":'WGS84'},
... always_xy=True,
... )
>>> xpjr, ypjr, zpjr = transprojr.transform(xpj, ypj, zpj, radians=True)
>>> "%.3f %.3f %.3f" % (xpjr, ypjr, zpjr)
'-2704026.010 -4253051.810 3895878.820'
>>> transformer = Transformer.from_proj("epsg:4326", 4326, skip_equivalent=True)
>>> xeq, yeq = transformer.transform(33, 98)
>>> "%.0f %.0f" % (xeq, yeq)
'33 98'
"""
# process inputs, making copies that support buffer API.
inx, xisfloat, xislist, xistuple = _copytobuffer(xx)
iny, yisfloat, yislist, yistuple = _copytobuffer(yy)
if zz is not None:
inz, zisfloat, zislist, zistuple = _copytobuffer(zz)
else:
inz = None
if tt is not None:
intime, tisfloat, tislist, tistuple = _copytobuffer(tt)
else:
intime = None
# call pj_transform. inx,iny,inz buffers modified in place.
self._transformer._transform(
inx,
iny,
inz=inz,
intime=intime,
direction=direction,
radians=radians,
errcheck=errcheck,
)
# if inputs were lists, tuples or floats, convert back.
outx = _convertback(xisfloat, xislist, xistuple, inx)
outy = _convertback(yisfloat, yislist, xistuple, iny)
return_data = (outx, outy)
if inz is not None:
return_data += ( # type: ignore
_convertback(zisfloat, zislist, zistuple, inz),
)
if intime is not None:
return_data += ( # type: ignore
_convertback(tisfloat, tislist, tistuple, intime),
)
return return_data
|
https://github.com/pyproj4/pyproj/issues/565
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.7/site-packages/pyproj/transformer.py", line 446, in transform
errcheck=errcheck,
File "pyproj/_transformer.pyx", line 463, in pyproj._transformer._Transformer._transform
pyproj.exceptions.ProjError: transform error: latitude or longitude exceeded limits
|
pyproj.exceptions.ProjError
|
def itransform(
self,
points: Any,
switch: bool = False,
time_3rd: bool = False,
radians: bool = False,
errcheck: bool = False,
direction: Union[TransformDirection, str] = TransformDirection.FORWARD,
) -> Iterator[Iterable]:
"""
Iterator/generator version of the function pyproj.Transformer.transform.
.. versionadded:: 2.1.1 errcheck
.. versionadded:: 2.2.0 direction
Parameters
----------
points: list
List of point tuples.
switch: boolean, optional
If True x, y or lon,lat coordinates of points are switched to y, x
or lat, lon. Default is False.
time_3rd: boolean, optional
If the input coordinates are 3 dimensional and the 3rd dimension is time.
radians: boolean, optional
If True, will expect input data to be in radians and will return radians
if the projection is geographic. Default is False (degrees). Ignored for
pipeline transformations.
errcheck: boolean, optional (default False)
If True an exception is raised if the transformation is invalid.
By default errcheck=False and an invalid transformation
returns ``inf`` and no exception is raised.
direction: pyproj.enums.TransformDirection, optional
The direction of the transform.
Default is :attr:`pyproj.enums.TransformDirection.FORWARD`.
Example:
>>> from pyproj import Transformer
>>> transformer = Transformer.from_crs(4326, 2100)
>>> points = [(22.95, 40.63), (22.81, 40.53), (23.51, 40.86)]
>>> for pt in transformer.itransform(points): '{:.3f} {:.3f}'.format(*pt)
'2221638.801 2637034.372'
'2212924.125 2619851.898'
'2238294.779 2703763.736'
>>> pipeline_str = (
... "+proj=pipeline +step +proj=longlat +ellps=WGS84 "
... "+step +proj=unitconvert +xy_in=rad +xy_out=deg"
... )
>>> pipe_trans = Transformer.from_pipeline(pipeline_str)
>>> for pt in pipe_trans.itransform([(2.1, 0.001)]):
... '{:.3f} {:.3f}'.format(*pt)
'2.100 0.001'
>>> transproj = Transformer.from_crs(
... {"proj":'geocent', "ellps":'WGS84', "datum":'WGS84'},
... "EPSG:4326",
... always_xy=True,
... )
>>> for pt in transproj.itransform(
... [(-2704026.010, -4253051.810, 3895878.820)],
... radians=True,
... ):
... '{:.3f} {:.3f} {:.3f}'.format(*pt)
'-2.137 0.661 -20.531'
>>> transprojr = Transformer.from_crs(
... "EPSG:4326",
... {"proj":'geocent', "ellps":'WGS84', "datum":'WGS84'},
... always_xy=True,
... )
>>> for pt in transprojr.itransform(
... [(-2.137, 0.661, -20.531)],
... radians=True
... ):
... '{:.3f} {:.3f} {:.3f}'.format(*pt)
'-2704214.394 -4254414.478 3894270.731'
>>> transproj_eq = Transformer.from_proj(
... 'EPSG:4326',
... '+proj=longlat +datum=WGS84 +no_defs +type=crs',
... always_xy=True,
... skip_equivalent=True
... )
>>> for pt in transproj_eq.itransform([(-2.137, 0.661)]):
... '{:.3f} {:.3f}'.format(*pt)
'-2.137 0.661'
"""
it = iter(points) # point iterator
# get first point to check stride
try:
fst_pt = next(it)
except StopIteration:
raise ValueError("iterable must contain at least one point")
stride = len(fst_pt)
if stride not in (2, 3, 4):
raise ValueError("points can contain up to 4 coordinates")
if time_3rd and stride != 3:
raise ValueError("'time_3rd' is only valid for 3 coordinates.")
# create a coordinate sequence generator etc. x1,y1,z1,x2,y2,z2,....
# chain so the generator returns the first point that was already acquired
coord_gen = chain(fst_pt, (coords[c] for coords in it for c in range(stride)))
while True:
# create a temporary buffer storage for
# the next 64 points (64*stride*8 bytes)
buff = array("d", islice(coord_gen, 0, 64 * stride))
if len(buff) == 0:
break
self._transformer._transform_sequence(
stride,
buff,
switch=switch,
direction=direction,
time_3rd=time_3rd,
radians=radians,
errcheck=errcheck,
)
for pt in zip(*([iter(buff)] * stride)):
yield pt
|
def itransform(
self,
points: Any,
switch: bool = False,
time_3rd: bool = False,
radians: bool = False,
errcheck: bool = False,
direction: Union[TransformDirection, str] = TransformDirection.FORWARD,
) -> Iterator[Iterable]:
"""
Iterator/generator version of the function pyproj.Transformer.transform.
.. versionadded:: 2.1.1 errcheck
.. versionadded:: 2.2.0 direction
Parameters
----------
points: list
List of point tuples.
switch: boolean, optional
If True x, y or lon,lat coordinates of points are switched to y, x
or lat, lon. Default is False.
time_3rd: boolean, optional
If the input coordinates are 3 dimensional and the 3rd dimension is time.
radians: boolean, optional
If True, will expect input data to be in radians and will return radians
if the projection is geographic. Default is False (degrees). Ignored for
pipeline transformations.
errcheck: boolean, optional (default False)
If True an exception is raised if the transformation is invalid.
By default errcheck=False and an invalid transformation
returns ``inf`` and no exception is raised.
direction: pyproj.enums.TransformDirection, optional
The direction of the transform.
Default is :attr:`pyproj.enums.TransformDirection.FORWARD`.
Example:
>>> from pyproj import Transformer
>>> transformer = Transformer.from_crs(4326, 2100)
>>> points = [(22.95, 40.63), (22.81, 40.53), (23.51, 40.86)]
>>> for pt in transformer.itransform(points): '{:.3f} {:.3f}'.format(*pt)
'2221638.801 2637034.372'
'2212924.125 2619851.898'
'2238294.779 2703763.736'
>>> pipeline_str = (
... "+proj=pipeline +step +proj=longlat +ellps=WGS84 "
... "+step +proj=unitconvert +xy_in=rad +xy_out=deg"
... )
>>> pipe_trans = Transformer.from_pipeline(pipeline_str)
>>> for pt in pipe_trans.itransform([(2.1, 0.001)]):
... '{:.3f} {:.3f}'.format(*pt)
'120.321 0.057'
>>> transproj = Transformer.from_crs(
... {"proj":'geocent', "ellps":'WGS84', "datum":'WGS84'},
... "EPSG:4326",
... always_xy=True,
... )
>>> for pt in transproj.itransform(
... [(-2704026.010, -4253051.810, 3895878.820)],
... radians=True,
... ):
... '{:.3f} {:.3f} {:.3f}'.format(*pt)
'-2.137 0.661 -20.531'
>>> transprojr = Transformer.from_crs(
... "EPSG:4326",
... {"proj":'geocent', "ellps":'WGS84', "datum":'WGS84'},
... always_xy=True,
... )
>>> for pt in transprojr.itransform(
... [(-2.137, 0.661, -20.531)],
... radians=True
... ):
... '{:.3f} {:.3f} {:.3f}'.format(*pt)
'-2704214.394 -4254414.478 3894270.731'
>>> transproj_eq = Transformer.from_proj(
... 'EPSG:4326',
... '+proj=longlat +datum=WGS84 +no_defs +type=crs',
... always_xy=True,
... skip_equivalent=True
... )
>>> for pt in transproj_eq.itransform([(-2.137, 0.661)]):
... '{:.3f} {:.3f}'.format(*pt)
'-2.137 0.661'
"""
it = iter(points) # point iterator
# get first point to check stride
try:
fst_pt = next(it)
except StopIteration:
raise ValueError("iterable must contain at least one point")
stride = len(fst_pt)
if stride not in (2, 3, 4):
raise ValueError("points can contain up to 4 coordinates")
if time_3rd and stride != 3:
raise ValueError("'time_3rd' is only valid for 3 coordinates.")
# create a coordinate sequence generator etc. x1,y1,z1,x2,y2,z2,....
# chain so the generator returns the first point that was already acquired
coord_gen = chain(fst_pt, (coords[c] for coords in it for c in range(stride)))
while True:
# create a temporary buffer storage for
# the next 64 points (64*stride*8 bytes)
buff = array("d", islice(coord_gen, 0, 64 * stride))
if len(buff) == 0:
break
self._transformer._transform_sequence(
stride,
buff,
switch=switch,
direction=direction,
time_3rd=time_3rd,
radians=radians,
errcheck=errcheck,
)
for pt in zip(*([iter(buff)] * stride)):
yield pt
|
https://github.com/pyproj4/pyproj/issues/565
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.7/site-packages/pyproj/transformer.py", line 446, in transform
errcheck=errcheck,
File "pyproj/_transformer.pyx", line 463, in pyproj._transformer._Transformer._transform
pyproj.exceptions.ProjError: transform error: latitude or longitude exceeded limits
|
pyproj.exceptions.ProjError
|
def from_user_input(value: Any) -> "CRS":
"""
Initialize a CRS class instance with:
- PROJ string
- Dictionary of PROJ parameters
- PROJ keyword arguments for parameters
- JSON string with PROJ parameters
- CRS WKT string
- An authority string [i.e. 'epsg:4326']
- An EPSG integer code [i.e. 4326]
- A tuple of ("auth_name": "auth_code") [i.e ('epsg', '4326')]
- An object with a `to_wkt` method.
- A :class:`pyproj.crs.CRS` class
Parameters
----------
value : obj
A Python int, dict, or str.
Returns
-------
CRS
"""
if isinstance(value, CRS):
return value
return CRS(value)
|
def from_user_input(value: str) -> "CRS":
"""
Initialize a CRS class instance with:
- PROJ string
- Dictionary of PROJ parameters
- PROJ keyword arguments for parameters
- JSON string with PROJ parameters
- CRS WKT string
- An authority string [i.e. 'epsg:4326']
- An EPSG integer code [i.e. 4326]
- A tuple of ("auth_name": "auth_code") [i.e ('epsg', '4326')]
- An object with a `to_wkt` method.
- A :class:`pyproj.crs.CRS` class
Parameters
----------
value : obj
A Python int, dict, or str.
Returns
-------
CRS
"""
if isinstance(value, CRS):
return value
return CRS(value)
|
https://github.com/pyproj4/pyproj/issues/554
|
import pyproj
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_name()
KeyError: 'URN:OGC:DEF:DATUM:EPSG::6326'
During handling of the above exception, another exception occurred:
CRSError Traceback (most recent call last)
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_string()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_name()
CRSError: Invalid datum name: urn:ogc:def:datum:EPSG::6326
During handling of the above exception, another exception occurred:
CRSError Traceback (most recent call last)
<ipython-input-1-98cb605ea9de> in <module>
----> 1 import pyproj
~/scipy/repos/pyproj/pyproj/__init__.py in <module>
79 )
80 from pyproj._show_versions import show_versions # noqa: F401
---> 81 from pyproj.crs import CRS # noqa: F401
82 from pyproj.exceptions import DataDirError, ProjError # noqa: F401
83 from pyproj.geod import Geod, geodesic_version_str, pj_ellps # noqa: F401
~/scipy/repos/pyproj/pyproj/crs/__init__.py in <module>
17 is_wkt,
18 )
---> 19 from pyproj.crs.crs import ( # noqa: F401
20 CRS,
21 BoundCRS,
~/scipy/repos/pyproj/pyproj/crs/crs.py in <module>
1026
1027
-> 1028 class ProjectedCRS(CRS):
1029 """
1030 .. versionadded:: 2.5.0
~/scipy/repos/pyproj/pyproj/crs/crs.py in ProjectedCRS()
1038 name="undefined",
1039 cartesian_cs=Cartesian2DCS(),
-> 1040 geodetic_crs=GeographicCRS(),
1041 ):
1042 """
~/scipy/repos/pyproj/pyproj/crs/crs.py in __init__(self, name, datum, ellipsoidal_cs)
977 "type": "GeographicCRS",
978 "name": name,
--> 979 "datum": Datum.from_user_input(datum).to_json_dict(),
980 "coordinate_system": CoordinateSystem.from_user_input(
981 ellipsoidal_cs
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs._CRSParts.from_user_input()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_string()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_string()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum._from_string()
CRSError: Invalid datum string: urn:ogc:def:datum:EPSG::6326: (Internal Proj Error: proj_create: SQLite error on SELECT name, ellipsoid_auth_name, ellipsoid_code, prime_meridian_auth_name, prime_meridian_code, area_of_use_auth_name, area_of_use_code, publication_date, deprecated FROM geodetic_datum WHERE auth_name = ? AND code = ?: no such column: publication_date)
|
KeyError
|
def __init__(
self,
name: str = "undefined",
datum: Any = "urn:ogc:def:datum:EPSG::6326",
ellipsoidal_cs: Any = None,
) -> None:
"""
Parameters
----------
name: str, optional
Name of the CRS. Default is undefined.
datum: Any, optional
Anything accepted by :meth:`pyproj.crs.Datum.from_user_input` or
a :class:`pyproj.crs.datum.CustomDatum`.
ellipsoidal_cs: Any, optional
Input to create an Ellipsoidal Coordinate System.
Anything accepted by :meth:`pyproj.crs.CoordinateSystem.from_user_input`
or an Ellipsoidal Coordinate System created from :ref:`coordinate_system`.
"""
geographic_crs_json = {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "GeographicCRS",
"name": name,
"datum": Datum.from_user_input(datum).to_json_dict(),
"coordinate_system": CoordinateSystem.from_user_input(
ellipsoidal_cs or Ellipsoidal2DCS()
).to_json_dict(),
}
super().__init__(geographic_crs_json)
|
def __init__(
self,
name: str = "undefined",
datum: Any = "urn:ogc:def:datum:EPSG::6326",
ellipsoidal_cs: Any = Ellipsoidal2DCS(),
) -> None:
"""
Parameters
----------
name: str, optional
Name of the CRS. Default is undefined.
datum: Any, optional
Anything accepted by :meth:`pyproj.crs.Datum.from_user_input` or
a :class:`pyproj.crs.datum.CustomDatum`.
ellipsoidal_cs: Any, optional
Input to create an Ellipsoidal Coordinate System.
Anything accepted by :meth:`pyproj.crs.CoordinateSystem.from_user_input`
or an Ellipsoidal Coordinate System created from :ref:`coordinate_system`.
"""
geographic_crs_json = {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "GeographicCRS",
"name": name,
"datum": Datum.from_user_input(datum).to_json_dict(),
"coordinate_system": CoordinateSystem.from_user_input(
ellipsoidal_cs
).to_json_dict(),
}
super().__init__(geographic_crs_json)
|
https://github.com/pyproj4/pyproj/issues/554
|
import pyproj
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_name()
KeyError: 'URN:OGC:DEF:DATUM:EPSG::6326'
During handling of the above exception, another exception occurred:
CRSError Traceback (most recent call last)
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_string()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_name()
CRSError: Invalid datum name: urn:ogc:def:datum:EPSG::6326
During handling of the above exception, another exception occurred:
CRSError Traceback (most recent call last)
<ipython-input-1-98cb605ea9de> in <module>
----> 1 import pyproj
~/scipy/repos/pyproj/pyproj/__init__.py in <module>
79 )
80 from pyproj._show_versions import show_versions # noqa: F401
---> 81 from pyproj.crs import CRS # noqa: F401
82 from pyproj.exceptions import DataDirError, ProjError # noqa: F401
83 from pyproj.geod import Geod, geodesic_version_str, pj_ellps # noqa: F401
~/scipy/repos/pyproj/pyproj/crs/__init__.py in <module>
17 is_wkt,
18 )
---> 19 from pyproj.crs.crs import ( # noqa: F401
20 CRS,
21 BoundCRS,
~/scipy/repos/pyproj/pyproj/crs/crs.py in <module>
1026
1027
-> 1028 class ProjectedCRS(CRS):
1029 """
1030 .. versionadded:: 2.5.0
~/scipy/repos/pyproj/pyproj/crs/crs.py in ProjectedCRS()
1038 name="undefined",
1039 cartesian_cs=Cartesian2DCS(),
-> 1040 geodetic_crs=GeographicCRS(),
1041 ):
1042 """
~/scipy/repos/pyproj/pyproj/crs/crs.py in __init__(self, name, datum, ellipsoidal_cs)
977 "type": "GeographicCRS",
978 "name": name,
--> 979 "datum": Datum.from_user_input(datum).to_json_dict(),
980 "coordinate_system": CoordinateSystem.from_user_input(
981 ellipsoidal_cs
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs._CRSParts.from_user_input()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_string()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_string()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum._from_string()
CRSError: Invalid datum string: urn:ogc:def:datum:EPSG::6326: (Internal Proj Error: proj_create: SQLite error on SELECT name, ellipsoid_auth_name, ellipsoid_code, prime_meridian_auth_name, prime_meridian_code, area_of_use_auth_name, area_of_use_code, publication_date, deprecated FROM geodetic_datum WHERE auth_name = ? AND code = ?: no such column: publication_date)
|
KeyError
|
def __init__(
self,
base_crs: Any,
conversion: Any,
ellipsoidal_cs: Any = None,
name: str = "undefined",
) -> None:
"""
Parameters
----------
base_crs: Any
Input to create the Geodetic CRS, a :class:`GeographicCRS` or
anything accepted by :meth:`pyproj.crs.CRS.from_user_input`.
conversion: Any
Anything accepted by :meth:`pyproj.crs.CoordinateSystem.from_user_input`
or a conversion from :ref:`coordinate_operation`.
ellipsoidal_cs: Any, optional
Input to create an Ellipsoidal Coordinate System.
Anything accepted by :meth:`pyproj.crs.CoordinateSystem.from_user_input`
or an Ellipsoidal Coordinate System created from :ref:`coordinate_system`.
name: str, optional
Name of the CRS. Default is undefined.
"""
derived_geographic_crs_json = {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "DerivedGeographicCRS",
"name": name,
"base_crs": CRS.from_user_input(base_crs).to_json_dict(),
"conversion": CoordinateOperation.from_user_input(conversion).to_json_dict(),
"coordinate_system": CoordinateSystem.from_user_input(
ellipsoidal_cs or Ellipsoidal2DCS()
).to_json_dict(),
}
super().__init__(derived_geographic_crs_json)
|
def __init__(
self,
base_crs: Any,
conversion: Any,
ellipsoidal_cs: Any = Ellipsoidal2DCS(),
name: str = "undefined",
) -> None:
"""
Parameters
----------
base_crs: Any
Input to create the Geodetic CRS, a :class:`GeographicCRS` or
anything accepted by :meth:`pyproj.crs.CRS.from_user_input`.
conversion: Any
Anything accepted by :meth:`pyproj.crs.CoordinateSystem.from_user_input`
or a conversion from :ref:`coordinate_operation`.
ellipsoidal_cs: Any, optional
Input to create an Ellipsoidal Coordinate System.
Anything accepted by :meth:`pyproj.crs.CoordinateSystem.from_user_input`
or an Ellipsoidal Coordinate System created from :ref:`coordinate_system`.
name: str, optional
Name of the CRS. Default is undefined.
"""
derived_geographic_crs_json = {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "DerivedGeographicCRS",
"name": name,
"base_crs": CRS.from_user_input(base_crs).to_json_dict(),
"conversion": CoordinateOperation.from_user_input(conversion).to_json_dict(),
"coordinate_system": CoordinateSystem.from_user_input(
ellipsoidal_cs
).to_json_dict(),
}
super().__init__(derived_geographic_crs_json)
|
https://github.com/pyproj4/pyproj/issues/554
|
import pyproj
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_name()
KeyError: 'URN:OGC:DEF:DATUM:EPSG::6326'
During handling of the above exception, another exception occurred:
CRSError Traceback (most recent call last)
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_string()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_name()
CRSError: Invalid datum name: urn:ogc:def:datum:EPSG::6326
During handling of the above exception, another exception occurred:
CRSError Traceback (most recent call last)
<ipython-input-1-98cb605ea9de> in <module>
----> 1 import pyproj
~/scipy/repos/pyproj/pyproj/__init__.py in <module>
79 )
80 from pyproj._show_versions import show_versions # noqa: F401
---> 81 from pyproj.crs import CRS # noqa: F401
82 from pyproj.exceptions import DataDirError, ProjError # noqa: F401
83 from pyproj.geod import Geod, geodesic_version_str, pj_ellps # noqa: F401
~/scipy/repos/pyproj/pyproj/crs/__init__.py in <module>
17 is_wkt,
18 )
---> 19 from pyproj.crs.crs import ( # noqa: F401
20 CRS,
21 BoundCRS,
~/scipy/repos/pyproj/pyproj/crs/crs.py in <module>
1026
1027
-> 1028 class ProjectedCRS(CRS):
1029 """
1030 .. versionadded:: 2.5.0
~/scipy/repos/pyproj/pyproj/crs/crs.py in ProjectedCRS()
1038 name="undefined",
1039 cartesian_cs=Cartesian2DCS(),
-> 1040 geodetic_crs=GeographicCRS(),
1041 ):
1042 """
~/scipy/repos/pyproj/pyproj/crs/crs.py in __init__(self, name, datum, ellipsoidal_cs)
977 "type": "GeographicCRS",
978 "name": name,
--> 979 "datum": Datum.from_user_input(datum).to_json_dict(),
980 "coordinate_system": CoordinateSystem.from_user_input(
981 ellipsoidal_cs
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs._CRSParts.from_user_input()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_string()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_string()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum._from_string()
CRSError: Invalid datum string: urn:ogc:def:datum:EPSG::6326: (Internal Proj Error: proj_create: SQLite error on SELECT name, ellipsoid_auth_name, ellipsoid_code, prime_meridian_auth_name, prime_meridian_code, area_of_use_auth_name, area_of_use_code, publication_date, deprecated FROM geodetic_datum WHERE auth_name = ? AND code = ?: no such column: publication_date)
|
KeyError
|
def __init__(
self,
conversion: Any,
name: str = "undefined",
cartesian_cs: Any = None,
geodetic_crs: Any = None,
) -> None:
"""
Parameters
----------
conversion: Any
Anything accepted by :meth:`pyproj.crs.CoordinateSystem.from_user_input`
or a conversion from :ref:`coordinate_operation`.
name: str, optional
The name of the Projected CRS. Default is undefined.
cartesian_cs: Any, optional
Input to create a Cartesian Coordinate System.
Anything accepted by :meth:`pyproj.crs.CoordinateSystem.from_user_input`
or :class:`pyproj.crs.coordinate_system.Cartesian2DCS`.
geodetic_crs: Any, optional
Input to create the Geodetic CRS, a :class:`GeographicCRS` or
anything accepted by :meth:`pyproj.crs.CRS.from_user_input`.
"""
proj_crs_json = {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "ProjectedCRS",
"name": name,
"base_crs": CRS.from_user_input(geodetic_crs or GeographicCRS()).to_json_dict(),
"conversion": CoordinateOperation.from_user_input(conversion).to_json_dict(),
"coordinate_system": CoordinateSystem.from_user_input(
cartesian_cs or Cartesian2DCS()
).to_json_dict(),
}
super().__init__(proj_crs_json)
|
def __init__(
self,
conversion: Any,
name: str = "undefined",
cartesian_cs: Any = Cartesian2DCS(),
geodetic_crs: Any = GeographicCRS(),
) -> None:
"""
Parameters
----------
conversion: Any
Anything accepted by :meth:`pyproj.crs.CoordinateSystem.from_user_input`
or a conversion from :ref:`coordinate_operation`.
name: str, optional
The name of the Projected CRS. Default is undefined.
cartesian_cs: Any, optional
Input to create a Cartesian Coordinate System.
Anything accepted by :meth:`pyproj.crs.CoordinateSystem.from_user_input`
or :class:`pyproj.crs.coordinate_system.Cartesian2DCS`.
geodetic_crs: Any, optional
Input to create the Geodetic CRS, a :class:`GeographicCRS` or
anything accepted by :meth:`pyproj.crs.CRS.from_user_input`.
"""
proj_crs_json = {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "ProjectedCRS",
"name": name,
"base_crs": CRS.from_user_input(geodetic_crs).to_json_dict(),
"conversion": CoordinateOperation.from_user_input(conversion).to_json_dict(),
"coordinate_system": CoordinateSystem.from_user_input(
cartesian_cs
).to_json_dict(),
}
super().__init__(proj_crs_json)
|
https://github.com/pyproj4/pyproj/issues/554
|
import pyproj
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_name()
KeyError: 'URN:OGC:DEF:DATUM:EPSG::6326'
During handling of the above exception, another exception occurred:
CRSError Traceback (most recent call last)
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_string()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_name()
CRSError: Invalid datum name: urn:ogc:def:datum:EPSG::6326
During handling of the above exception, another exception occurred:
CRSError Traceback (most recent call last)
<ipython-input-1-98cb605ea9de> in <module>
----> 1 import pyproj
~/scipy/repos/pyproj/pyproj/__init__.py in <module>
79 )
80 from pyproj._show_versions import show_versions # noqa: F401
---> 81 from pyproj.crs import CRS # noqa: F401
82 from pyproj.exceptions import DataDirError, ProjError # noqa: F401
83 from pyproj.geod import Geod, geodesic_version_str, pj_ellps # noqa: F401
~/scipy/repos/pyproj/pyproj/crs/__init__.py in <module>
17 is_wkt,
18 )
---> 19 from pyproj.crs.crs import ( # noqa: F401
20 CRS,
21 BoundCRS,
~/scipy/repos/pyproj/pyproj/crs/crs.py in <module>
1026
1027
-> 1028 class ProjectedCRS(CRS):
1029 """
1030 .. versionadded:: 2.5.0
~/scipy/repos/pyproj/pyproj/crs/crs.py in ProjectedCRS()
1038 name="undefined",
1039 cartesian_cs=Cartesian2DCS(),
-> 1040 geodetic_crs=GeographicCRS(),
1041 ):
1042 """
~/scipy/repos/pyproj/pyproj/crs/crs.py in __init__(self, name, datum, ellipsoidal_cs)
977 "type": "GeographicCRS",
978 "name": name,
--> 979 "datum": Datum.from_user_input(datum).to_json_dict(),
980 "coordinate_system": CoordinateSystem.from_user_input(
981 ellipsoidal_cs
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs._CRSParts.from_user_input()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_string()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_string()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum._from_string()
CRSError: Invalid datum string: urn:ogc:def:datum:EPSG::6326: (Internal Proj Error: proj_create: SQLite error on SELECT name, ellipsoid_auth_name, ellipsoid_code, prime_meridian_auth_name, prime_meridian_code, area_of_use_auth_name, area_of_use_code, publication_date, deprecated FROM geodetic_datum WHERE auth_name = ? AND code = ?: no such column: publication_date)
|
KeyError
|
def __init__(
self,
name: str,
datum: Any,
vertical_cs: Any = None,
geoid_model: Optional[str] = None,
) -> None:
"""
Parameters
----------
name: str
The name of the Vertical CRS (e.g. NAVD88 height).
datum: Any
Anything accepted by :meth:`pyproj.crs.Datum.from_user_input`
vertical_cs: Any, optional
Input to create a Vertical Coordinate System accepted by
:meth:`pyproj.crs.CoordinateSystem.from_user_input`
or :class:`pyproj.crs.coordinate_system.VerticalCS`
geoid_model: str, optional
The name of the GEOID Model (e.g. GEOID12B).
"""
vert_crs_json = {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "VerticalCRS",
"name": name,
"datum": Datum.from_user_input(datum).to_json_dict(),
"coordinate_system": CoordinateSystem.from_user_input(
vertical_cs or VerticalCS()
).to_json_dict(),
}
if geoid_model is not None:
vert_crs_json["geoid_model"] = {"name": geoid_model}
super().__init__(vert_crs_json)
|
def __init__(
self,
name: str,
datum: Any,
vertical_cs: Any = VerticalCS(),
geoid_model: str = None,
) -> None:
"""
Parameters
----------
name: str
The name of the Vertical CRS (e.g. NAVD88 height).
datum: Any
Anything accepted by :meth:`pyproj.crs.Datum.from_user_input`
vertical_cs: Any, optional
Input to create a Vertical Coordinate System accepted by
:meth:`pyproj.crs.CoordinateSystem.from_user_input`
or :class:`pyproj.crs.coordinate_system.VerticalCS`
geoid_model: str, optional
The name of the GEOID Model (e.g. GEOID12B).
"""
vert_crs_json = {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "VerticalCRS",
"name": name,
"datum": Datum.from_user_input(datum).to_json_dict(),
"coordinate_system": CoordinateSystem.from_user_input(
vertical_cs
).to_json_dict(),
}
if geoid_model is not None:
vert_crs_json["geoid_model"] = {"name": geoid_model}
super().__init__(vert_crs_json)
|
https://github.com/pyproj4/pyproj/issues/554
|
import pyproj
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_name()
KeyError: 'URN:OGC:DEF:DATUM:EPSG::6326'
During handling of the above exception, another exception occurred:
CRSError Traceback (most recent call last)
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_string()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_name()
CRSError: Invalid datum name: urn:ogc:def:datum:EPSG::6326
During handling of the above exception, another exception occurred:
CRSError Traceback (most recent call last)
<ipython-input-1-98cb605ea9de> in <module>
----> 1 import pyproj
~/scipy/repos/pyproj/pyproj/__init__.py in <module>
79 )
80 from pyproj._show_versions import show_versions # noqa: F401
---> 81 from pyproj.crs import CRS # noqa: F401
82 from pyproj.exceptions import DataDirError, ProjError # noqa: F401
83 from pyproj.geod import Geod, geodesic_version_str, pj_ellps # noqa: F401
~/scipy/repos/pyproj/pyproj/crs/__init__.py in <module>
17 is_wkt,
18 )
---> 19 from pyproj.crs.crs import ( # noqa: F401
20 CRS,
21 BoundCRS,
~/scipy/repos/pyproj/pyproj/crs/crs.py in <module>
1026
1027
-> 1028 class ProjectedCRS(CRS):
1029 """
1030 .. versionadded:: 2.5.0
~/scipy/repos/pyproj/pyproj/crs/crs.py in ProjectedCRS()
1038 name="undefined",
1039 cartesian_cs=Cartesian2DCS(),
-> 1040 geodetic_crs=GeographicCRS(),
1041 ):
1042 """
~/scipy/repos/pyproj/pyproj/crs/crs.py in __init__(self, name, datum, ellipsoidal_cs)
977 "type": "GeographicCRS",
978 "name": name,
--> 979 "datum": Datum.from_user_input(datum).to_json_dict(),
980 "coordinate_system": CoordinateSystem.from_user_input(
981 ellipsoidal_cs
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs._CRSParts.from_user_input()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_string()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum.from_string()
~/scipy/repos/pyproj/pyproj/_crs.pyx in pyproj._crs.Datum._from_string()
CRSError: Invalid datum string: urn:ogc:def:datum:EPSG::6326: (Internal Proj Error: proj_create: SQLite error on SELECT name, ellipsoid_auth_name, ellipsoid_code, prime_meridian_auth_name, prime_meridian_code, area_of_use_auth_name, area_of_use_code, publication_date, deprecated FROM geodetic_datum WHERE auth_name = ? AND code = ?: no such column: publication_date)
|
KeyError
|
def set_data_dir(proj_data_dir):
"""
Set the data directory for PROJ to use.
Parameters
----------
proj_data_dir: str
The path to rhe PROJ data directory.
"""
global _USER_PROJ_DATA
_USER_PROJ_DATA = proj_data_dir
# reset search paths
from pyproj._datadir import PYPROJ_CONTEXT
PYPROJ_CONTEXT.set_search_paths(reset=True)
|
def set_data_dir(proj_data_dir):
"""
Set the data directory for PROJ to use.
Parameters
----------
proj_data_dir: str
The path to rhe PROJ data directory.
"""
global _USER_PROJ_DATA
_USER_PROJ_DATA = proj_data_dir
# reset search paths
from pyproj._datadir import PYPROJ_CONTEXT
PYPROJ_CONTEXT.set_search_paths()
|
https://github.com/pyproj4/pyproj/issues/415
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
...
File "/opt/conda/lib/python3.7/site-packages/geopandas/geodataframe.py", line 459, in to_crs
geom = df.geometry.to_crs(crs=crs, epsg=epsg)
File "/opt/conda/lib/python3.7/site-packages/geopandas/geoseries.py", line 304, in to_crs
proj_in = pyproj.Proj(self.crs, preserve_units=True)
File "/opt/conda/lib/python3.7/site-packages/pyproj/proj.py", line 147, in __init__
self.crs = CRS.from_user_input(projparams if projparams is not None else kwargs)
File "/opt/conda/lib/python3.7/site-packages/pyproj/crs.py", line 435, in from_user_input
return cls(value)
File "/opt/conda/lib/python3.7/site-packages/pyproj/crs.py", line 304, in __init__
super(CRS, self).__init__(projstring)
File "pyproj/_crs.pyx", line 1308, in pyproj._crs._CRS.__init__
File "pyproj/_datadir.pyx", line 18, in pyproj._datadir.get_pyproj_context
File "/opt/conda/lib/python3.7/site-packages/pyproj/datadir.py", line 99, in get_data_dir
"Valid PROJ data directory not found. "
pyproj.exceptions.DataDirError: Valid PROJ data directory not found. Either set the path using the environmental variable PROJ_LIB or with `pyproj.datadir.set_data_dir`.
|
pyproj.exceptions.DataDirError
|
def set_data_dir(proj_data_dir):
"""
Set the data directory for PROJ to use.
Parameters
----------
proj_data_dir: str
The path to rhe PROJ data directory.
"""
global _USER_PROJ_DATA
_USER_PROJ_DATA = proj_data_dir
# reset search paths
from pyproj._datadir import PYPROJ_CONTEXT
PYPROJ_CONTEXT.set_search_paths()
|
def set_data_dir(proj_data_dir):
"""
Set the data directory for PROJ to use.
Parameters
----------
proj_data_dir: str
The path to rhe PROJ data directory.
"""
global _USER_PROJ_DATA
global _VALIDATED_PROJ_DATA
_USER_PROJ_DATA = proj_data_dir
# set to none to re-validate
_VALIDATED_PROJ_DATA = None
|
https://github.com/pyproj4/pyproj/issues/374
|
97%|█████████████████████████████████▊ | 88243/91210 [00:26<00:00, 6190.94it/s]
CRSs instantiated: 507
CRSs instantiated (cache hits included): 88603
Transformers instantiated: 502
Transformers instantiated (cache hits included): 88389
---------------------------------------------------------------------------
ProjError Traceback (most recent call last)
... <snip> ...
~/.local/share/virtualenvs/bug-Ew6sNC7W/lib/python3.7/site-packages/pyproj/transformer.py in from_proj(proj_from, proj_to, skip_equivalent, always_xy)
pyproj/_transformer.pyx in pyproj._transformer._Transformer.from_crs()
ProjError: Error creating CRS to CRS.: (Internal Proj Error: proj_create: no dat
abase context specified)
In [2]:
Do you really want to exit ([y]/n)?
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/home/elias/.local/share/virtualenvs/bug-Ew6sNC7W/lib/python3.7/site-packages/IPython/core/history.py", line 578, in end_session
sqlite3.OperationalError: unable to open database file
|
ProjError
|
def get_data_dir():
"""
The order of preference for the data directory is:
1. The one set by pyproj.datadir.set_data_dir (if exists & valid)
2. The internal proj directory (if exists & valid)
3. The directory in PROJ_LIB (if exists & valid)
4. The directory on the PATH (if exists & valid)
Returns
-------
str: The valid data directory.
"""
global _USER_PROJ_DATA
internal_datadir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "proj_dir", "share", "proj"
)
proj_lib_dirs = os.environ.get("PROJ_LIB", "")
def valid_data_dir(potential_data_dir):
if potential_data_dir is not None and os.path.exists(
os.path.join(potential_data_dir, "proj.db")
):
return True
return False
def valid_data_dirs(potential_data_dirs):
if potential_data_dirs is None:
return False
for proj_data_dir in potential_data_dirs.split(os.pathsep):
if valid_data_dir(proj_data_dir):
return True
break
return None
validated_proj_data = None
if valid_data_dirs(_USER_PROJ_DATA):
validated_proj_data = _USER_PROJ_DATA
elif valid_data_dir(internal_datadir):
validated_proj_data = internal_datadir
elif valid_data_dirs(proj_lib_dirs):
validated_proj_data = proj_lib_dirs
else:
proj_exe = find_executable("proj")
if proj_exe is not None:
system_proj_dir = os.path.join(
os.path.dirname(os.path.dirname(proj_exe)), "share", "proj"
)
if valid_data_dir(system_proj_dir):
validated_proj_data = system_proj_dir
if validated_proj_data is None:
raise DataDirError(
"Valid PROJ data directory not found. "
"Either set the path using the environmental variable PROJ_LIB or "
"with `pyproj.datadir.set_data_dir`."
)
return validated_proj_data
|
def get_data_dir():
"""
The order of preference for the data directory is:
1. The one set by pyproj.datadir.set_data_dir (if exists & valid)
2. The internal proj directory (if exists & valid)
3. The directory in PROJ_LIB (if exists & valid)
4. The directory on the PATH (if exists & valid)
Returns
-------
str: The valid data directory.
"""
# to avoid re-validating
global _VALIDATED_PROJ_DATA
if _VALIDATED_PROJ_DATA is not None:
return _VALIDATED_PROJ_DATA
global _USER_PROJ_DATA
internal_datadir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "proj_dir", "share", "proj"
)
proj_lib_dirs = os.environ.get("PROJ_LIB", "")
def valid_data_dir(potential_data_dir):
if potential_data_dir is not None and os.path.exists(
os.path.join(potential_data_dir, "proj.db")
):
return True
return False
def valid_data_dirs(potential_data_dirs):
if potential_data_dirs is None:
return False
for proj_data_dir in potential_data_dirs.split(os.pathsep):
if valid_data_dir(proj_data_dir):
return True
break
return None
if valid_data_dirs(_USER_PROJ_DATA):
_VALIDATED_PROJ_DATA = _USER_PROJ_DATA
elif valid_data_dir(internal_datadir):
_VALIDATED_PROJ_DATA = internal_datadir
elif valid_data_dirs(proj_lib_dirs):
_VALIDATED_PROJ_DATA = proj_lib_dirs
else:
proj_exe = find_executable("proj")
if proj_exe is not None:
system_proj_dir = os.path.join(
os.path.dirname(os.path.dirname(proj_exe)), "share", "proj"
)
if valid_data_dir(system_proj_dir):
_VALIDATED_PROJ_DATA = system_proj_dir
if _VALIDATED_PROJ_DATA is None:
raise DataDirError(
"Valid PROJ data directory not found. "
"Either set the path using the environmental variable PROJ_LIB or "
"with `pyproj.datadir.set_data_dir`."
)
return _VALIDATED_PROJ_DATA
|
https://github.com/pyproj4/pyproj/issues/374
|
97%|█████████████████████████████████▊ | 88243/91210 [00:26<00:00, 6190.94it/s]
CRSs instantiated: 507
CRSs instantiated (cache hits included): 88603
Transformers instantiated: 502
Transformers instantiated (cache hits included): 88389
---------------------------------------------------------------------------
ProjError Traceback (most recent call last)
... <snip> ...
~/.local/share/virtualenvs/bug-Ew6sNC7W/lib/python3.7/site-packages/pyproj/transformer.py in from_proj(proj_from, proj_to, skip_equivalent, always_xy)
pyproj/_transformer.pyx in pyproj._transformer._Transformer.from_crs()
ProjError: Error creating CRS to CRS.: (Internal Proj Error: proj_create: no dat
abase context specified)
In [2]:
Do you really want to exit ([y]/n)?
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/home/elias/.local/share/virtualenvs/bug-Ew6sNC7W/lib/python3.7/site-packages/IPython/core/history.py", line 578, in end_session
sqlite3.OperationalError: unable to open database file
|
ProjError
|
def from_proj(proj_from, proj_to, skip_equivalent=False, always_xy=False):
"""Make a Transformer from a :obj:`~pyproj.proj.Proj` or input used to create one.
Parameters
----------
proj_from: :obj:`~pyproj.proj.Proj` or input used to create one
Projection of input data.
proj_to: :obj:`~pyproj.proj.Proj` or input used to create one
Projection of output data.
skip_equivalent: bool, optional
If true, will skip the transformation operation if input and output
projections are equivalent. Default is false.
always_xy: bool, optional
If true, the transform method will accept as input and return as output
coordinates using the traditional GIS order, that is longitude, latitude
for geographic CRS and easting, northing for most projected CRS.
Default is false.
Returns
-------
:obj:`~Transformer`
"""
if not isinstance(proj_from, Proj):
proj_from = Proj(proj_from)
if not isinstance(proj_to, Proj):
proj_to = Proj(proj_to)
return Transformer(
_Transformer.from_crs(
proj_from.crs,
proj_to.crs,
skip_equivalent=skip_equivalent,
always_xy=always_xy,
)
)
|
def from_proj(proj_from, proj_to, skip_equivalent=False, always_xy=False):
"""Make a Transformer from a :obj:`~pyproj.proj.Proj` or input used to create one.
Parameters
----------
proj_from: :obj:`~pyproj.proj.Proj` or input used to create one
Projection of input data.
proj_to: :obj:`~pyproj.proj.Proj` or input used to create one
Projection of output data.
skip_equivalent: bool, optional
If true, will skip the transformation operation if input and output
projections are equivalent. Default is false.
always_xy: bool, optional
If true, the transform method will accept as input and return as output
coordinates using the traditional GIS order, that is longitude, latitude
for geographic CRS and easting, northing for most projected CRS.
Default is false.
Returns
-------
:obj:`~Transformer`
"""
if not isinstance(proj_from, Proj):
proj_from = Proj(proj_from)
if not isinstance(proj_to, Proj):
proj_to = Proj(proj_to)
transformer = Transformer()
transformer._transformer = _Transformer.from_crs(
proj_from.crs,
proj_to.crs,
skip_equivalent=skip_equivalent,
always_xy=always_xy,
)
return transformer
|
https://github.com/pyproj4/pyproj/issues/321
|
In [4]: t = pyproj.Transformer()
In [5]: t
Out[5]: <pyproj.transformer.Transformer at 0x7fd75ff9b860>
In [6]: t.transform(0, 0)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-65405fa99360> in <module>
----> 1 t.transform(0, 0)
~/scipy/repos/pyproj/pyproj/transformer.py in transform(self, xx, yy, zz, tt, radians, errcheck, direction)
207 intime = None
208 # call pj_transform. inx,iny,inz buffers modified in place.
--> 209 self._transformer._transform(
210 inx,
211 iny,
AttributeError: 'Transformer' object has no attribute '_transformer'
|
AttributeError
|
def from_crs(crs_from, crs_to, skip_equivalent=False, always_xy=False):
"""Make a Transformer from a :obj:`~pyproj.crs.CRS` or input used to create one.
Parameters
----------
crs_from: ~pyproj.crs.CRS or input used to create one
Projection of input data.
crs_to: ~pyproj.crs.CRS or input used to create one
Projection of output data.
skip_equivalent: bool, optional
If true, will skip the transformation operation if input and output
projections are equivalent. Default is false.
always_xy: bool, optional
If true, the transform method will accept as input and return as output
coordinates using the traditional GIS order, that is longitude, latitude
for geographic CRS and easting, northing for most projected CRS.
Default is false.
Returns
-------
:obj:`~Transformer`
"""
transformer = Transformer(
_Transformer.from_crs(
CRS.from_user_input(crs_from),
CRS.from_user_input(crs_to),
skip_equivalent=skip_equivalent,
always_xy=always_xy,
)
)
return transformer
|
def from_crs(crs_from, crs_to, skip_equivalent=False, always_xy=False):
"""Make a Transformer from a :obj:`~pyproj.crs.CRS` or input used to create one.
Parameters
----------
crs_from: ~pyproj.crs.CRS or input used to create one
Projection of input data.
crs_to: ~pyproj.crs.CRS or input used to create one
Projection of output data.
skip_equivalent: bool, optional
If true, will skip the transformation operation if input and output
projections are equivalent. Default is false.
always_xy: bool, optional
If true, the transform method will accept as input and return as output
coordinates using the traditional GIS order, that is longitude, latitude
for geographic CRS and easting, northing for most projected CRS.
Default is false.
Returns
-------
:obj:`~Transformer`
"""
transformer = Transformer()
transformer._transformer = _Transformer.from_crs(
CRS.from_user_input(crs_from),
CRS.from_user_input(crs_to),
skip_equivalent=skip_equivalent,
always_xy=always_xy,
)
return transformer
|
https://github.com/pyproj4/pyproj/issues/321
|
In [4]: t = pyproj.Transformer()
In [5]: t
Out[5]: <pyproj.transformer.Transformer at 0x7fd75ff9b860>
In [6]: t.transform(0, 0)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-65405fa99360> in <module>
----> 1 t.transform(0, 0)
~/scipy/repos/pyproj/pyproj/transformer.py in transform(self, xx, yy, zz, tt, radians, errcheck, direction)
207 intime = None
208 # call pj_transform. inx,iny,inz buffers modified in place.
--> 209 self._transformer._transform(
210 inx,
211 iny,
AttributeError: 'Transformer' object has no attribute '_transformer'
|
AttributeError
|
def from_pipeline(proj_pipeline):
"""Make a Transformer from a PROJ pipeline string.
https://proj4.org/operations/pipeline.html
Parameters
----------
proj_pipeline: str
Projection pipeline string.
Returns
-------
~Transformer
"""
return Transformer(_Transformer.from_pipeline(cstrencode(proj_pipeline)))
|
def from_pipeline(proj_pipeline):
"""Make a Transformer from a PROJ pipeline string.
https://proj4.org/operations/pipeline.html
Parameters
----------
proj_pipeline: str
Projection pipeline string.
Returns
-------
~Transformer
"""
transformer = Transformer()
transformer._transformer = _Transformer.from_pipeline(cstrencode(proj_pipeline))
return transformer
|
https://github.com/pyproj4/pyproj/issues/321
|
In [4]: t = pyproj.Transformer()
In [5]: t
Out[5]: <pyproj.transformer.Transformer at 0x7fd75ff9b860>
In [6]: t.transform(0, 0)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-65405fa99360> in <module>
----> 1 t.transform(0, 0)
~/scipy/repos/pyproj/pyproj/transformer.py in transform(self, xx, yy, zz, tt, radians, errcheck, direction)
207 intime = None
208 # call pj_transform. inx,iny,inz buffers modified in place.
--> 209 self._transformer._transform(
210 inx,
211 iny,
AttributeError: 'Transformer' object has no attribute '_transformer'
|
AttributeError
|
def _dict2string(projparams):
# convert a dict to a proj4 string.
pjargs = []
proj_inserted = False
for key, value in projparams.items():
# the towgs84 as list
if isinstance(value, (list, tuple)):
value = ",".join([str(val) for val in value])
# issue 183 (+ no_rot)
if value is None or value is True:
pjargs.append("+{key}".format(key=key))
elif value is False:
pass
# make sure string starts with proj or init
elif not proj_inserted and key in ("init", "proj"):
pjargs.insert(0, "+{key}={value}".format(key=key, value=value))
proj_inserted = True
else:
pjargs.append("+{key}={value}".format(key=key, value=value))
return " ".join(pjargs)
|
def _dict2string(projparams):
# convert a dict to a proj4 string.
pjargs = []
for key, value in projparams.items():
# the towgs84 as list
if isinstance(value, (list, tuple)):
value = ",".join([str(val) for val in value])
# issue 183 (+ no_rot)
if value is None or value is True:
pjargs.append("+" + key + " ")
elif value is False:
pass
else:
pjargs.append("+" + key + "=" + str(value) + " ")
return "".join(pjargs)
|
https://github.com/pyproj4/pyproj/issues/270
|
from pyproj import Proj
Proj({'a': 6371229.0, 'b': 6371229.0, 'lon_0': -10.0, 'o_lat_p': 30.0, 'o_lon_p': 0.0, 'o_proj': 'longlat', 'proj'
: 'ob_tran'})
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../lib/python3.7/site-packages/pyproj/proj.py", line 303, in __init__
cstrencode(self.crs.to_proj4().replace("+type=crs", "").strip())
AttributeError: 'NoneType' object has no attribute 'replace'
|
AttributeError
|
def __init__(self, projparams=None, preserve_units=True, **kwargs):
"""
initialize a Proj class instance.
See the proj documentation (https://github.com/OSGeo/proj.4/wiki)
for more information about projection parameters.
Parameters
----------
projparams: int, str, dict, pyproj.CRS
A proj.4 or WKT string, proj.4 dict, EPSG integer, or a pyproj.CRS instnace.
preserve_units: bool
If false, will ensure +units=m.
**kwargs:
proj.4 projection parameters.
Example usage:
>>> from pyproj import Proj
>>> p = Proj(proj='utm',zone=10,ellps='WGS84', preserve_units=False) # use kwargs
>>> x,y = p(-120.108, 34.36116666)
>>> 'x=%9.3f y=%11.3f' % (x,y)
'x=765975.641 y=3805993.134'
>>> 'lon=%8.3f lat=%5.3f' % p(x,y,inverse=True)
'lon=-120.108 lat=34.361'
>>> # do 3 cities at a time in a tuple (Fresno, LA, SF)
>>> lons = (-119.72,-118.40,-122.38)
>>> lats = (36.77, 33.93, 37.62 )
>>> x,y = p(lons, lats)
>>> 'x: %9.3f %9.3f %9.3f' % x
'x: 792763.863 925321.537 554714.301'
>>> 'y: %9.3f %9.3f %9.3f' % y
'y: 4074377.617 3763936.941 4163835.303'
>>> lons, lats = p(x, y, inverse=True) # inverse transform
>>> 'lons: %8.3f %8.3f %8.3f' % lons
'lons: -119.720 -118.400 -122.380'
>>> 'lats: %8.3f %8.3f %8.3f' % lats
'lats: 36.770 33.930 37.620'
>>> p2 = Proj('+proj=utm +zone=10 +ellps=WGS84', preserve_units=False) # use proj4 string
>>> x,y = p2(-120.108, 34.36116666)
>>> 'x=%9.3f y=%11.3f' % (x,y)
'x=765975.641 y=3805993.134'
>>> p = Proj(init="epsg:32667", preserve_units=False)
>>> 'x=%12.3f y=%12.3f (meters)' % p(-114.057222, 51.045)
'x=-1783506.250 y= 6193827.033 (meters)'
>>> p = Proj("+init=epsg:32667")
>>> 'x=%12.3f y=%12.3f (feet)' % p(-114.057222, 51.045)
'x=-5851386.754 y=20320914.191 (feet)'
>>> # test data with radian inputs
>>> p1 = Proj(init="epsg:4214")
>>> x1, y1 = p1(116.366, 39.867)
>>> '{:.3f} {:.3f}'.format(x1, y1)
'2.031 0.696'
>>> x2, y2 = p1(x1, y1, inverse=True)
>>> '{:.3f} {:.3f}'.format(x2, y2)
'116.366 39.867'
"""
self.crs = CRS.from_user_input(projparams if projparams is not None else kwargs)
# make sure units are meters if preserve_units is False.
if not preserve_units and "foot" in self.crs.axis_info[0].unit_name:
projstring = self.crs.to_proj4(4)
projstring = re.sub(r"\s\+units=[\w-]+", "", projstring)
projstring += " +units=m"
self.crs = CRS(projstring)
super(Proj, self).__init__(
cstrencode(
(self.crs.to_proj4() or self.crs.srs).replace("+type=crs", "").strip()
)
)
|
def __init__(self, projparams=None, preserve_units=True, **kwargs):
"""
initialize a Proj class instance.
See the proj documentation (https://github.com/OSGeo/proj.4/wiki)
for more information about projection parameters.
Parameters
----------
projparams: int, str, dict, pyproj.CRS
A proj.4 or WKT string, proj.4 dict, EPSG integer, or a pyproj.CRS instnace.
preserve_units: bool
If false, will ensure +units=m.
**kwargs:
proj.4 projection parameters.
Example usage:
>>> from pyproj import Proj
>>> p = Proj(proj='utm',zone=10,ellps='WGS84', preserve_units=False) # use kwargs
>>> x,y = p(-120.108, 34.36116666)
>>> 'x=%9.3f y=%11.3f' % (x,y)
'x=765975.641 y=3805993.134'
>>> 'lon=%8.3f lat=%5.3f' % p(x,y,inverse=True)
'lon=-120.108 lat=34.361'
>>> # do 3 cities at a time in a tuple (Fresno, LA, SF)
>>> lons = (-119.72,-118.40,-122.38)
>>> lats = (36.77, 33.93, 37.62 )
>>> x,y = p(lons, lats)
>>> 'x: %9.3f %9.3f %9.3f' % x
'x: 792763.863 925321.537 554714.301'
>>> 'y: %9.3f %9.3f %9.3f' % y
'y: 4074377.617 3763936.941 4163835.303'
>>> lons, lats = p(x, y, inverse=True) # inverse transform
>>> 'lons: %8.3f %8.3f %8.3f' % lons
'lons: -119.720 -118.400 -122.380'
>>> 'lats: %8.3f %8.3f %8.3f' % lats
'lats: 36.770 33.930 37.620'
>>> p2 = Proj('+proj=utm +zone=10 +ellps=WGS84', preserve_units=False) # use proj4 string
>>> x,y = p2(-120.108, 34.36116666)
>>> 'x=%9.3f y=%11.3f' % (x,y)
'x=765975.641 y=3805993.134'
>>> p = Proj(init="epsg:32667", preserve_units=False)
>>> 'x=%12.3f y=%12.3f (meters)' % p(-114.057222, 51.045)
'x=-1783506.250 y= 6193827.033 (meters)'
>>> p = Proj("+init=epsg:32667")
>>> 'x=%12.3f y=%12.3f (feet)' % p(-114.057222, 51.045)
'x=-5851386.754 y=20320914.191 (feet)'
>>> # test data with radian inputs
>>> p1 = Proj(init="epsg:4214")
>>> x1, y1 = p1(116.366, 39.867)
>>> '{:.3f} {:.3f}'.format(x1, y1)
'2.031 0.696'
>>> x2, y2 = p1(x1, y1, inverse=True)
>>> '{:.3f} {:.3f}'.format(x2, y2)
'116.366 39.867'
"""
self.crs = CRS.from_user_input(projparams if projparams is not None else kwargs)
# make sure units are meters if preserve_units is False.
if not preserve_units and "foot" in self.crs.axis_info[0].unit_name:
projstring = self.crs.to_proj4(4)
projstring = re.sub(r"\s\+units=[\w-]+", "", projstring)
projstring += " +units=m"
self.crs = CRS(projstring)
super(Proj, self).__init__(
cstrencode(self.crs.to_proj4().replace("+type=crs", "").strip())
)
|
https://github.com/pyproj4/pyproj/issues/270
|
from pyproj import Proj
Proj({'a': 6371229.0, 'b': 6371229.0, 'lon_0': -10.0, 'o_lat_p': 30.0, 'o_lon_p': 0.0, 'o_proj': 'longlat', 'proj'
: 'ob_tran'})
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../lib/python3.7/site-packages/pyproj/proj.py", line 303, in __init__
cstrencode(self.crs.to_proj4().replace("+type=crs", "").strip())
AttributeError: 'NoneType' object has no attribute 'replace'
|
AttributeError
|
def Kuf_conv_patch(inducing_variable, kernel, Xnew):
Xp = kernel.get_patches(Xnew) # [N, num_patches, patch_len]
bigKzx = kernel.base_kernel.K(
inducing_variable.Z, Xp
) # [M, N, P] -- thanks to broadcasting of kernels
Kzx = tf.reduce_sum(
bigKzx * kernel.weights if hasattr(kernel, "weights") else bigKzx, [2]
)
return Kzx / kernel.num_patches
|
def Kuf_conv_patch(feat, kern, Xnew):
Xp = kern.get_patches(Xnew) # [N, num_patches, patch_len]
bigKzx = kern.base_kernel.K(
feat.Z, Xp
) # [M, N, P] -- thanks to broadcasting of kernels
Kzx = tf.reduce_sum(
bigKzx * kern.weights if hasattr(kern, "weights") else bigKzx, [2]
)
return Kzx / kern.num_patches
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def Kuu_kernel_inducingpoints(
inducing_variable: InducingPoints, kernel: Kernel, *, jitter=0.0
):
Kzz = kernel(inducing_variable.Z)
Kzz += jitter * tf.eye(inducing_variable.num_inducing, dtype=Kzz.dtype)
return Kzz
|
def Kuu_kernel_inducingpoints(
inducing_variable: InducingPoints, kernel: Kernel, *, jitter=0.0
):
Kzz = kernel(inducing_variable.Z)
Kzz += jitter * tf.eye(len(inducing_variable), dtype=Kzz.dtype)
return Kzz
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def Kuu_sqexp_multiscale(
inducing_variable: Multiscale, kernel: SquaredExponential, *, jitter=0.0
):
Zmu, Zlen = kernel.slice(inducing_variable.Z, inducing_variable.scales)
idlengthscales2 = tf.square(kernel.lengthscales + Zlen)
sc = tf.sqrt(
idlengthscales2[None, ...]
+ idlengthscales2[:, None, ...]
- kernel.lengthscales**2
)
d = inducing_variable._cust_square_dist(Zmu, Zmu, sc)
Kzz = kernel.variance * tf.exp(-d / 2) * tf.reduce_prod(kernel.lengthscales / sc, 2)
Kzz += jitter * tf.eye(inducing_variable.num_inducing, dtype=Kzz.dtype)
return Kzz
|
def Kuu_sqexp_multiscale(
inducing_variable: Multiscale, kernel: SquaredExponential, *, jitter=0.0
):
Zmu, Zlen = kernel.slice(inducing_variable.Z, inducing_variable.scales)
idlengthscales2 = tf.square(kernel.lengthscales + Zlen)
sc = tf.sqrt(
idlengthscales2[None, ...]
+ idlengthscales2[:, None, ...]
- kernel.lengthscales**2
)
d = inducing_variable._cust_square_dist(Zmu, Zmu, sc)
Kzz = kernel.variance * tf.exp(-d / 2) * tf.reduce_prod(kernel.lengthscales / sc, 2)
Kzz += jitter * tf.eye(len(inducing_variable), dtype=Kzz.dtype)
return Kzz
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def Kuu_conv_patch(inducing_variable, kernel, jitter=0.0):
return kernel.base_kernel.K(inducing_variable.Z) + jitter * tf.eye(
inducing_variable.num_inducing, dtype=default_float()
)
|
def Kuu_conv_patch(feat, kern, jitter=0.0):
return kern.base_kernel.K(feat.Z) + jitter * tf.eye(
len(feat), dtype=default_float()
)
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def _Kuu(
inducing_variable: FallbackSeparateIndependentInducingVariables,
kernel: Union[SeparateIndependent, LinearCoregionalization],
*,
jitter=0.0,
):
Kmms = [
Kuu(f, k)
for f, k in zip(inducing_variable.inducing_variable_list, kernel.kernels)
]
Kmm = tf.stack(Kmms, axis=0) # [L, M, M]
jittermat = (
tf.eye(inducing_variable.num_inducing, dtype=Kmm.dtype)[None, :, :] * jitter
)
return Kmm + jittermat
|
def _Kuu(
inducing_variable: FallbackSeparateIndependentInducingVariables,
kernel: Union[SeparateIndependent, LinearCoregionalization],
*,
jitter=0.0,
):
Kmms = [
Kuu(f, k)
for f, k in zip(inducing_variable.inducing_variable_list, kernel.kernels)
]
Kmm = tf.stack(Kmms, axis=0) # [L, M, M]
jittermat = tf.eye(len(inducing_variable), dtype=Kmm.dtype)[None, :, :] * jitter
return Kmm + jittermat
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def __init__(self, Z: TensorData, name: Optional[str] = None):
"""
:param Z: the initial positions of the inducing points, size [M, D]
"""
super().__init__(name=name)
if not isinstance(Z, (tf.Variable, tfp.util.TransformedVariable)):
Z = Parameter(Z)
self.Z = Z
|
def __init__(self, Z: TensorData, name: Optional[str] = None):
"""
:param Z: the initial positions of the inducing points, size [M, D]
"""
super().__init__(name=name)
self.Z = Parameter(Z, dtype=default_float())
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def __len__(self) -> int:
return tf.shape(self.Z)[0]
|
def __len__(self) -> int:
return self.Z.shape[0]
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def __len__(self) -> int:
return self.inducing_variable.num_inducing
|
def __len__(self) -> int:
return len(self.inducing_variable)
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def __len__(self) -> int:
# TODO(st--) we should check that they all have the same length...
return self.inducing_variable_list[0].num_inducing
|
def __len__(self) -> int:
return len(self.inducing_variable_list[0])
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def __init__(
self,
distribution_class: Type[tfp.distributions.Distribution] = tfp.distributions.Normal,
scale_transform: Optional[tfp.bijectors.Bijector] = None,
**kwargs,
):
"""
:param distribution_class: distribution class parameterized by `loc` and `scale`
as first and second argument, respectively.
:param scale_transform: callable/bijector applied to the latent
function modelling the scale to ensure its positivity.
Typically, `tf.exp` or `tf.softplus`, but can be any function f: R -> R^+. Defaults to exp if not explicitly specified.
"""
if scale_transform is None:
scale_transform = positive(base="exp")
self.scale_transform = scale_transform
def conditional_distribution(Fs) -> tfp.distributions.Distribution:
tf.debugging.assert_equal(tf.shape(Fs)[-1], 2)
loc = Fs[..., :1]
scale = self.scale_transform(Fs[..., 1:])
return distribution_class(loc, scale)
super().__init__(
latent_dim=2,
conditional_distribution=conditional_distribution,
**kwargs,
)
|
def __init__(
self,
distribution_class: Type[tfp.distributions.Distribution] = tfp.distributions.Normal,
scale_transform: tfp.bijectors.Bijector = positive(base="exp"),
**kwargs,
):
"""
:param distribution_class: distribution class parameterized by `loc` and `scale`
as first and second argument, respectively.
:param scale_transform: callable/bijector applied to the latent
function modelling the scale to ensure its positivity.
Typically, `tf.exp` or `tf.softplus`, but can be any function f: R -> R^+.
"""
def conditional_distribution(Fs) -> tfp.distributions.Distribution:
tf.debugging.assert_equal(tf.shape(Fs)[-1], 2)
loc = Fs[..., :1]
scale = scale_transform(Fs[..., 1:])
return distribution_class(loc, scale)
super().__init__(
latent_dim=2,
conditional_distribution=conditional_distribution,
**kwargs,
)
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def conditional_distribution(Fs) -> tfp.distributions.Distribution:
tf.debugging.assert_equal(tf.shape(Fs)[-1], 2)
loc = Fs[..., :1]
scale = self.scale_transform(Fs[..., 1:])
return distribution_class(loc, scale)
|
def conditional_distribution(Fs) -> tfp.distributions.Distribution:
tf.debugging.assert_equal(tf.shape(Fs)[-1], 2)
loc = Fs[..., :1]
scale = scale_transform(Fs[..., 1:])
return distribution_class(loc, scale)
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def elbo(self) -> tf.Tensor:
"""
Construct a tensorflow function to compute the bound on the marginal
likelihood.
"""
Y_data = self.data
pX = DiagonalGaussian(self.X_data_mean, self.X_data_var)
num_inducing = self.inducing_variable.num_inducing
psi0 = tf.reduce_sum(expectation(pX, self.kernel))
psi1 = expectation(pX, (self.kernel, self.inducing_variable))
psi2 = tf.reduce_sum(
expectation(
pX,
(self.kernel, self.inducing_variable),
(self.kernel, self.inducing_variable),
),
axis=0,
)
cov_uu = covariances.Kuu(
self.inducing_variable, self.kernel, jitter=default_jitter()
)
L = tf.linalg.cholesky(cov_uu)
sigma2 = self.likelihood.variance
sigma = tf.sqrt(sigma2)
# Compute intermediate matrices
A = tf.linalg.triangular_solve(L, tf.transpose(psi1), lower=True) / sigma
tmp = tf.linalg.triangular_solve(L, psi2, lower=True)
AAT = tf.linalg.triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2
B = AAT + tf.eye(num_inducing, dtype=default_float())
LB = tf.linalg.cholesky(B)
log_det_B = 2.0 * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LB)))
c = tf.linalg.triangular_solve(LB, tf.linalg.matmul(A, Y_data), lower=True) / sigma
# KL[q(x) || p(x)]
dX_data_var = (
self.X_data_var
if self.X_data_var.shape.ndims == 2
else tf.linalg.diag_part(self.X_data_var)
)
NQ = to_default_float(tf.size(self.X_data_mean))
D = to_default_float(tf.shape(Y_data)[1])
KL = -0.5 * tf.reduce_sum(tf.math.log(dX_data_var))
KL += 0.5 * tf.reduce_sum(tf.math.log(self.X_prior_var))
KL -= 0.5 * NQ
KL += 0.5 * tf.reduce_sum(
(tf.square(self.X_data_mean - self.X_prior_mean) + dX_data_var)
/ self.X_prior_var
)
# compute log marginal bound
ND = to_default_float(tf.size(Y_data))
bound = -0.5 * ND * tf.math.log(2 * np.pi * sigma2)
bound += -0.5 * D * log_det_B
bound += -0.5 * tf.reduce_sum(tf.square(Y_data)) / sigma2
bound += 0.5 * tf.reduce_sum(tf.square(c))
bound += (
-0.5
* D
* (tf.reduce_sum(psi0) / sigma2 - tf.reduce_sum(tf.linalg.diag_part(AAT)))
)
bound -= KL
return bound
|
def elbo(self) -> tf.Tensor:
"""
Construct a tensorflow function to compute the bound on the marginal
likelihood.
"""
Y_data = self.data
pX = DiagonalGaussian(self.X_data_mean, self.X_data_var)
num_inducing = len(self.inducing_variable)
psi0 = tf.reduce_sum(expectation(pX, self.kernel))
psi1 = expectation(pX, (self.kernel, self.inducing_variable))
psi2 = tf.reduce_sum(
expectation(
pX,
(self.kernel, self.inducing_variable),
(self.kernel, self.inducing_variable),
),
axis=0,
)
cov_uu = covariances.Kuu(
self.inducing_variable, self.kernel, jitter=default_jitter()
)
L = tf.linalg.cholesky(cov_uu)
sigma2 = self.likelihood.variance
sigma = tf.sqrt(sigma2)
# Compute intermediate matrices
A = tf.linalg.triangular_solve(L, tf.transpose(psi1), lower=True) / sigma
tmp = tf.linalg.triangular_solve(L, psi2, lower=True)
AAT = tf.linalg.triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2
B = AAT + tf.eye(num_inducing, dtype=default_float())
LB = tf.linalg.cholesky(B)
log_det_B = 2.0 * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LB)))
c = tf.linalg.triangular_solve(LB, tf.linalg.matmul(A, Y_data), lower=True) / sigma
# KL[q(x) || p(x)]
dX_data_var = (
self.X_data_var
if self.X_data_var.shape.ndims == 2
else tf.linalg.diag_part(self.X_data_var)
)
NQ = to_default_float(tf.size(self.X_data_mean))
D = to_default_float(tf.shape(Y_data)[1])
KL = -0.5 * tf.reduce_sum(tf.math.log(dX_data_var))
KL += 0.5 * tf.reduce_sum(tf.math.log(self.X_prior_var))
KL -= 0.5 * NQ
KL += 0.5 * tf.reduce_sum(
(tf.square(self.X_data_mean - self.X_prior_mean) + dX_data_var)
/ self.X_prior_var
)
# compute log marginal bound
ND = to_default_float(tf.size(Y_data))
bound = -0.5 * ND * tf.math.log(2 * np.pi * sigma2)
bound += -0.5 * D * log_det_B
bound += -0.5 * tf.reduce_sum(tf.square(Y_data)) / sigma2
bound += 0.5 * tf.reduce_sum(tf.square(c))
bound += (
-0.5
* D
* (tf.reduce_sum(psi0) / sigma2 - tf.reduce_sum(tf.linalg.diag_part(AAT)))
)
bound -= KL
return bound
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def predict_f(
self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False
) -> MeanAndVariance:
"""
Compute the mean and variance of the latent function at some new points.
Note that this is very similar to the SGPR prediction, for which
there are notes in the SGPR notebook.
Note: This model does not allow full output covariances.
:param Xnew: points at which to predict
"""
if full_output_cov:
raise NotImplementedError
pX = DiagonalGaussian(self.X_data_mean, self.X_data_var)
Y_data = self.data
num_inducing = self.inducing_variable.num_inducing
psi1 = expectation(pX, (self.kernel, self.inducing_variable))
psi2 = tf.reduce_sum(
expectation(
pX,
(self.kernel, self.inducing_variable),
(self.kernel, self.inducing_variable),
),
axis=0,
)
jitter = default_jitter()
Kus = covariances.Kuf(self.inducing_variable, self.kernel, Xnew)
sigma2 = self.likelihood.variance
sigma = tf.sqrt(sigma2)
L = tf.linalg.cholesky(
covariances.Kuu(self.inducing_variable, self.kernel, jitter=jitter)
)
A = tf.linalg.triangular_solve(L, tf.transpose(psi1), lower=True) / sigma
tmp = tf.linalg.triangular_solve(L, psi2, lower=True)
AAT = tf.linalg.triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2
B = AAT + tf.eye(num_inducing, dtype=default_float())
LB = tf.linalg.cholesky(B)
c = tf.linalg.triangular_solve(LB, tf.linalg.matmul(A, Y_data), lower=True) / sigma
tmp1 = tf.linalg.triangular_solve(L, Kus, lower=True)
tmp2 = tf.linalg.triangular_solve(LB, tmp1, lower=True)
mean = tf.linalg.matmul(tmp2, c, transpose_a=True)
if full_cov:
var = (
self.kernel(Xnew)
+ tf.linalg.matmul(tmp2, tmp2, transpose_a=True)
- tf.linalg.matmul(tmp1, tmp1, transpose_a=True)
)
shape = tf.stack([1, 1, tf.shape(Y_data)[1]])
var = tf.tile(tf.expand_dims(var, 2), shape)
else:
var = (
self.kernel(Xnew, full_cov=False)
+ tf.reduce_sum(tf.square(tmp2), axis=0)
- tf.reduce_sum(tf.square(tmp1), axis=0)
)
shape = tf.stack([1, tf.shape(Y_data)[1]])
var = tf.tile(tf.expand_dims(var, 1), shape)
return mean + self.mean_function(Xnew), var
|
def predict_f(
self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False
) -> MeanAndVariance:
"""
Compute the mean and variance of the latent function at some new points.
Note that this is very similar to the SGPR prediction, for which
there are notes in the SGPR notebook.
Note: This model does not allow full output covariances.
:param Xnew: points at which to predict
"""
if full_output_cov:
raise NotImplementedError
pX = DiagonalGaussian(self.X_data_mean, self.X_data_var)
Y_data = self.data
num_inducing = len(self.inducing_variable)
psi1 = expectation(pX, (self.kernel, self.inducing_variable))
psi2 = tf.reduce_sum(
expectation(
pX,
(self.kernel, self.inducing_variable),
(self.kernel, self.inducing_variable),
),
axis=0,
)
jitter = default_jitter()
Kus = covariances.Kuf(self.inducing_variable, self.kernel, Xnew)
sigma2 = self.likelihood.variance
sigma = tf.sqrt(sigma2)
L = tf.linalg.cholesky(
covariances.Kuu(self.inducing_variable, self.kernel, jitter=jitter)
)
A = tf.linalg.triangular_solve(L, tf.transpose(psi1), lower=True) / sigma
tmp = tf.linalg.triangular_solve(L, psi2, lower=True)
AAT = tf.linalg.triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2
B = AAT + tf.eye(num_inducing, dtype=default_float())
LB = tf.linalg.cholesky(B)
c = tf.linalg.triangular_solve(LB, tf.linalg.matmul(A, Y_data), lower=True) / sigma
tmp1 = tf.linalg.triangular_solve(L, Kus, lower=True)
tmp2 = tf.linalg.triangular_solve(LB, tmp1, lower=True)
mean = tf.linalg.matmul(tmp2, c, transpose_a=True)
if full_cov:
var = (
self.kernel(Xnew)
+ tf.linalg.matmul(tmp2, tmp2, transpose_a=True)
- tf.linalg.matmul(tmp1, tmp1, transpose_a=True)
)
shape = tf.stack([1, 1, tf.shape(Y_data)[1]])
var = tf.tile(tf.expand_dims(var, 2), shape)
else:
var = (
self.kernel(Xnew, full_cov=False)
+ tf.reduce_sum(tf.square(tmp2), axis=0)
- tf.reduce_sum(tf.square(tmp1), axis=0)
)
shape = tf.stack([1, tf.shape(Y_data)[1]])
var = tf.tile(tf.expand_dims(var, 1), shape)
return mean + self.mean_function(Xnew), var
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def __init__(
self,
data: RegressionData,
kernel: Kernel,
likelihood: Likelihood,
mean_function: Optional[MeanFunction] = None,
num_latent_gps: Optional[int] = None,
inducing_variable: Optional[InducingPoints] = None,
):
"""
data is a tuple of X, Y with X, a data matrix, size [N, D] and Y, a data matrix, size [N, R]
Z is a data matrix, of inducing inputs, size [M, D]
kernel, likelihood, mean_function are appropriate GPflow objects
"""
if num_latent_gps is None:
num_latent_gps = self.calc_num_latent_gps_from_data(data, kernel, likelihood)
super().__init__(kernel, likelihood, mean_function, num_latent_gps=num_latent_gps)
self.data = data_input_to_tensor(data)
self.num_data = data[0].shape[0]
self.inducing_variable = inducingpoint_wrapper(inducing_variable)
self.V = Parameter(
np.zeros((self.inducing_variable.num_inducing, self.num_latent_gps))
)
self.V.prior = tfp.distributions.Normal(
loc=to_default_float(0.0), scale=to_default_float(1.0)
)
|
def __init__(
self,
data: RegressionData,
kernel: Kernel,
likelihood: Likelihood,
mean_function: Optional[MeanFunction] = None,
num_latent_gps: Optional[int] = None,
inducing_variable: Optional[InducingPoints] = None,
):
"""
data is a tuple of X, Y with X, a data matrix, size [N, D] and Y, a data matrix, size [N, R]
Z is a data matrix, of inducing inputs, size [M, D]
kernel, likelihood, mean_function are appropriate GPflow objects
"""
if num_latent_gps is None:
num_latent_gps = self.calc_num_latent_gps_from_data(data, kernel, likelihood)
super().__init__(kernel, likelihood, mean_function, num_latent_gps=num_latent_gps)
self.data = data_input_to_tensor(data)
self.num_data = data[0].shape[0]
self.inducing_variable = inducingpoint_wrapper(inducing_variable)
self.V = Parameter(np.zeros((len(self.inducing_variable), self.num_latent_gps)))
self.V.prior = tfp.distributions.Normal(
loc=to_default_float(0.0), scale=to_default_float(1.0)
)
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def upper_bound(self) -> tf.Tensor:
"""
Upper bound for the sparse GP regression marginal likelihood. Note that
the same inducing points are used for calculating the upper bound, as are
used for computing the likelihood approximation. This may not lead to the
best upper bound. The upper bound can be tightened by optimising Z, just
like the lower bound. This is especially important in FITC, as FITC is
known to produce poor inducing point locations. An optimisable upper bound
can be found in https://github.com/markvdw/gp_upper.
The key reference is
::
@misc{titsias_2014,
title={Variational Inference for Gaussian and Determinantal Point Processes},
url={http://www2.aueb.gr/users/mtitsias/papers/titsiasNipsVar14.pdf},
publisher={Workshop on Advances in Variational Inference (NIPS 2014)},
author={Titsias, Michalis K.},
year={2014},
month={Dec}
}
The key quantity, the trace term, can be computed via
>>> _, v = conditionals.conditional(X, model.inducing_variable.Z, model.kernel,
... np.zeros((model.inducing_variable.num_inducing, 1)))
which computes each individual element of the trace term.
"""
X_data, Y_data = self.data
num_data = to_default_float(tf.shape(Y_data)[0])
Kdiag = self.kernel(X_data, full_cov=False)
kuu = Kuu(self.inducing_variable, self.kernel, jitter=default_jitter())
kuf = Kuf(self.inducing_variable, self.kernel, X_data)
I = tf.eye(tf.shape(kuu)[0], dtype=default_float())
L = tf.linalg.cholesky(kuu)
A = tf.linalg.triangular_solve(L, kuf, lower=True)
AAT = tf.linalg.matmul(A, A, transpose_b=True)
B = I + AAT / self.likelihood.variance
LB = tf.linalg.cholesky(B)
# Using the Trace bound, from Titsias' presentation
c = tf.reduce_sum(Kdiag) - tf.reduce_sum(tf.square(A))
# Alternative bound on max eigenval:
corrected_noise = self.likelihood.variance + c
const = -0.5 * num_data * tf.math.log(2 * np.pi * self.likelihood.variance)
logdet = -tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LB)))
err = Y_data - self.mean_function(X_data)
LC = tf.linalg.cholesky(I + AAT / corrected_noise)
v = tf.linalg.triangular_solve(
LC, tf.linalg.matmul(A, err) / corrected_noise, lower=True
)
quad = -0.5 * tf.reduce_sum(tf.square(err)) / corrected_noise + 0.5 * tf.reduce_sum(
tf.square(v)
)
return const + logdet + quad
|
def upper_bound(self) -> tf.Tensor:
"""
Upper bound for the sparse GP regression marginal likelihood. Note that
the same inducing points are used for calculating the upper bound, as are
used for computing the likelihood approximation. This may not lead to the
best upper bound. The upper bound can be tightened by optimising Z, just
like the lower bound. This is especially important in FITC, as FITC is
known to produce poor inducing point locations. An optimisable upper bound
can be found in https://github.com/markvdw/gp_upper.
The key reference is
::
@misc{titsias_2014,
title={Variational Inference for Gaussian and Determinantal Point Processes},
url={http://www2.aueb.gr/users/mtitsias/papers/titsiasNipsVar14.pdf},
publisher={Workshop on Advances in Variational Inference (NIPS 2014)},
author={Titsias, Michalis K.},
year={2014},
month={Dec}
}
The key quantity, the trace term, can be computed via
>>> _, v = conditionals.conditional(X, model.inducing_variable.Z, model.kernel,
... np.zeros((len(model.inducing_variable), 1)))
which computes each individual element of the trace term.
"""
X_data, Y_data = self.data
num_data = to_default_float(tf.shape(Y_data)[0])
Kdiag = self.kernel(X_data, full_cov=False)
kuu = Kuu(self.inducing_variable, self.kernel, jitter=default_jitter())
kuf = Kuf(self.inducing_variable, self.kernel, X_data)
I = tf.eye(tf.shape(kuu)[0], dtype=default_float())
L = tf.linalg.cholesky(kuu)
A = tf.linalg.triangular_solve(L, kuf, lower=True)
AAT = tf.linalg.matmul(A, A, transpose_b=True)
B = I + AAT / self.likelihood.variance
LB = tf.linalg.cholesky(B)
# Using the Trace bound, from Titsias' presentation
c = tf.reduce_sum(Kdiag) - tf.reduce_sum(tf.square(A))
# Alternative bound on max eigenval:
corrected_noise = self.likelihood.variance + c
const = -0.5 * num_data * tf.math.log(2 * np.pi * self.likelihood.variance)
logdet = -tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LB)))
err = Y_data - self.mean_function(X_data)
LC = tf.linalg.cholesky(I + AAT / corrected_noise)
v = tf.linalg.triangular_solve(
LC, tf.linalg.matmul(A, err) / corrected_noise, lower=True
)
quad = -0.5 * tf.reduce_sum(tf.square(err)) / corrected_noise + 0.5 * tf.reduce_sum(
tf.square(v)
)
return const + logdet + quad
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def elbo(self) -> tf.Tensor:
"""
Construct a tensorflow function to compute the bound on the marginal
likelihood. For a derivation of the terms in here, see the associated
SGPR notebook.
"""
X_data, Y_data = self.data
num_inducing = self.inducing_variable.num_inducing
num_data = to_default_float(tf.shape(Y_data)[0])
output_dim = to_default_float(tf.shape(Y_data)[1])
err = Y_data - self.mean_function(X_data)
Kdiag = self.kernel(X_data, full_cov=False)
kuf = Kuf(self.inducing_variable, self.kernel, X_data)
kuu = Kuu(self.inducing_variable, self.kernel, jitter=default_jitter())
L = tf.linalg.cholesky(kuu)
sigma = tf.sqrt(self.likelihood.variance)
# Compute intermediate matrices
A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma
AAT = tf.linalg.matmul(A, A, transpose_b=True)
B = AAT + tf.eye(num_inducing, dtype=default_float())
LB = tf.linalg.cholesky(B)
Aerr = tf.linalg.matmul(A, err)
c = tf.linalg.triangular_solve(LB, Aerr, lower=True) / sigma
# compute log marginal bound
bound = -0.5 * num_data * output_dim * np.log(2 * np.pi)
bound += tf.negative(output_dim) * tf.reduce_sum(
tf.math.log(tf.linalg.diag_part(LB))
)
bound -= 0.5 * num_data * output_dim * tf.math.log(self.likelihood.variance)
bound += -0.5 * tf.reduce_sum(tf.square(err)) / self.likelihood.variance
bound += 0.5 * tf.reduce_sum(tf.square(c))
bound += -0.5 * output_dim * tf.reduce_sum(Kdiag) / self.likelihood.variance
bound += 0.5 * output_dim * tf.reduce_sum(tf.linalg.diag_part(AAT))
return bound
|
def elbo(self) -> tf.Tensor:
"""
Construct a tensorflow function to compute the bound on the marginal
likelihood. For a derivation of the terms in here, see the associated
SGPR notebook.
"""
X_data, Y_data = self.data
num_inducing = len(self.inducing_variable)
num_data = to_default_float(tf.shape(Y_data)[0])
output_dim = to_default_float(tf.shape(Y_data)[1])
err = Y_data - self.mean_function(X_data)
Kdiag = self.kernel(X_data, full_cov=False)
kuf = Kuf(self.inducing_variable, self.kernel, X_data)
kuu = Kuu(self.inducing_variable, self.kernel, jitter=default_jitter())
L = tf.linalg.cholesky(kuu)
sigma = tf.sqrt(self.likelihood.variance)
# Compute intermediate matrices
A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma
AAT = tf.linalg.matmul(A, A, transpose_b=True)
B = AAT + tf.eye(num_inducing, dtype=default_float())
LB = tf.linalg.cholesky(B)
Aerr = tf.linalg.matmul(A, err)
c = tf.linalg.triangular_solve(LB, Aerr, lower=True) / sigma
# compute log marginal bound
bound = -0.5 * num_data * output_dim * np.log(2 * np.pi)
bound += tf.negative(output_dim) * tf.reduce_sum(
tf.math.log(tf.linalg.diag_part(LB))
)
bound -= 0.5 * num_data * output_dim * tf.math.log(self.likelihood.variance)
bound += -0.5 * tf.reduce_sum(tf.square(err)) / self.likelihood.variance
bound += 0.5 * tf.reduce_sum(tf.square(c))
bound += -0.5 * output_dim * tf.reduce_sum(Kdiag) / self.likelihood.variance
bound += 0.5 * output_dim * tf.reduce_sum(tf.linalg.diag_part(AAT))
return bound
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def predict_f(
self, Xnew: InputData, full_cov=False, full_output_cov=False
) -> MeanAndVariance:
"""
Compute the mean and variance of the latent function at some new points
Xnew. For a derivation of the terms in here, see the associated SGPR
notebook.
"""
X_data, Y_data = self.data
num_inducing = self.inducing_variable.num_inducing
err = Y_data - self.mean_function(X_data)
kuf = Kuf(self.inducing_variable, self.kernel, X_data)
kuu = Kuu(self.inducing_variable, self.kernel, jitter=default_jitter())
Kus = Kuf(self.inducing_variable, self.kernel, Xnew)
sigma = tf.sqrt(self.likelihood.variance)
L = tf.linalg.cholesky(kuu)
A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma
B = tf.linalg.matmul(A, A, transpose_b=True) + tf.eye(
num_inducing, dtype=default_float()
)
LB = tf.linalg.cholesky(B)
Aerr = tf.linalg.matmul(A, err)
c = tf.linalg.triangular_solve(LB, Aerr, lower=True) / sigma
tmp1 = tf.linalg.triangular_solve(L, Kus, lower=True)
tmp2 = tf.linalg.triangular_solve(LB, tmp1, lower=True)
mean = tf.linalg.matmul(tmp2, c, transpose_a=True)
if full_cov:
var = (
self.kernel(Xnew)
+ tf.linalg.matmul(tmp2, tmp2, transpose_a=True)
- tf.linalg.matmul(tmp1, tmp1, transpose_a=True)
)
var = tf.tile(var[None, ...], [self.num_latent_gps, 1, 1]) # [P, N, N]
else:
var = (
self.kernel(Xnew, full_cov=False)
+ tf.reduce_sum(tf.square(tmp2), 0)
- tf.reduce_sum(tf.square(tmp1), 0)
)
var = tf.tile(var[:, None], [1, self.num_latent_gps])
return mean + self.mean_function(Xnew), var
|
def predict_f(
self, Xnew: InputData, full_cov=False, full_output_cov=False
) -> MeanAndVariance:
"""
Compute the mean and variance of the latent function at some new points
Xnew. For a derivation of the terms in here, see the associated SGPR
notebook.
"""
X_data, Y_data = self.data
num_inducing = len(self.inducing_variable)
err = Y_data - self.mean_function(X_data)
kuf = Kuf(self.inducing_variable, self.kernel, X_data)
kuu = Kuu(self.inducing_variable, self.kernel, jitter=default_jitter())
Kus = Kuf(self.inducing_variable, self.kernel, Xnew)
sigma = tf.sqrt(self.likelihood.variance)
L = tf.linalg.cholesky(kuu)
A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma
B = tf.linalg.matmul(A, A, transpose_b=True) + tf.eye(
num_inducing, dtype=default_float()
)
LB = tf.linalg.cholesky(B)
Aerr = tf.linalg.matmul(A, err)
c = tf.linalg.triangular_solve(LB, Aerr, lower=True) / sigma
tmp1 = tf.linalg.triangular_solve(L, Kus, lower=True)
tmp2 = tf.linalg.triangular_solve(LB, tmp1, lower=True)
mean = tf.linalg.matmul(tmp2, c, transpose_a=True)
if full_cov:
var = (
self.kernel(Xnew)
+ tf.linalg.matmul(tmp2, tmp2, transpose_a=True)
- tf.linalg.matmul(tmp1, tmp1, transpose_a=True)
)
var = tf.tile(var[None, ...], [self.num_latent_gps, 1, 1]) # [P, N, N]
else:
var = (
self.kernel(Xnew, full_cov=False)
+ tf.reduce_sum(tf.square(tmp2), 0)
- tf.reduce_sum(tf.square(tmp1), 0)
)
var = tf.tile(var[:, None], [1, self.num_latent_gps])
return mean + self.mean_function(Xnew), var
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def common_terms(self):
X_data, Y_data = self.data
num_inducing = self.inducing_variable.num_inducing
err = Y_data - self.mean_function(X_data) # size [N, R]
Kdiag = self.kernel(X_data, full_cov=False)
kuf = Kuf(self.inducing_variable, self.kernel, X_data)
kuu = Kuu(self.inducing_variable, self.kernel, jitter=default_jitter())
Luu = tf.linalg.cholesky(kuu) # => Luu Luu^T = kuu
V = tf.linalg.triangular_solve(Luu, kuf) # => V^T V = Qff = kuf^T kuu^-1 kuf
diagQff = tf.reduce_sum(tf.square(V), 0)
nu = Kdiag - diagQff + self.likelihood.variance
B = tf.eye(num_inducing, dtype=default_float()) + tf.linalg.matmul(
V / nu, V, transpose_b=True
)
L = tf.linalg.cholesky(B)
beta = err / tf.expand_dims(nu, 1) # size [N, R]
alpha = tf.linalg.matmul(V, beta) # size [N, R]
gamma = tf.linalg.triangular_solve(L, alpha, lower=True) # size [N, R]
return err, nu, Luu, L, alpha, beta, gamma
|
def common_terms(self):
X_data, Y_data = self.data
num_inducing = len(self.inducing_variable)
err = Y_data - self.mean_function(X_data) # size [N, R]
Kdiag = self.kernel(X_data, full_cov=False)
kuf = Kuf(self.inducing_variable, self.kernel, X_data)
kuu = Kuu(self.inducing_variable, self.kernel, jitter=default_jitter())
Luu = tf.linalg.cholesky(kuu) # => Luu Luu^T = kuu
V = tf.linalg.triangular_solve(Luu, kuf) # => V^T V = Qff = kuf^T kuu^-1 kuf
diagQff = tf.reduce_sum(tf.square(V), 0)
nu = Kdiag - diagQff + self.likelihood.variance
B = tf.eye(num_inducing, dtype=default_float()) + tf.linalg.matmul(
V / nu, V, transpose_b=True
)
L = tf.linalg.cholesky(B)
beta = err / tf.expand_dims(nu, 1) # size [N, R]
alpha = tf.linalg.matmul(V, beta) # size [N, R]
gamma = tf.linalg.triangular_solve(L, alpha, lower=True) # size [N, R]
return err, nu, Luu, L, alpha, beta, gamma
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def __init__(
self,
kernel,
likelihood,
inducing_variable,
*,
mean_function=None,
num_latent_gps: int = 1,
q_diag: bool = False,
q_mu=None,
q_sqrt=None,
whiten: bool = True,
num_data=None,
):
"""
- kernel, likelihood, inducing_variables, mean_function are appropriate
GPflow objects
- num_latent_gps is the number of latent processes to use, defaults to 1
- q_diag is a boolean. If True, the covariance is approximated by a
diagonal matrix.
- whiten is a boolean. If True, we use the whitened representation of
the inducing points.
- num_data is the total number of observations, defaults to X.shape[0]
(relevant when feeding in external minibatches)
"""
# init the super class, accept args
super().__init__(kernel, likelihood, mean_function, num_latent_gps)
self.num_data = num_data
self.q_diag = q_diag
self.whiten = whiten
self.inducing_variable = inducingpoint_wrapper(inducing_variable)
# init variational parameters
num_inducing = self.inducing_variable.num_inducing
self._init_variational_parameters(num_inducing, q_mu, q_sqrt, q_diag)
|
def __init__(
self,
kernel,
likelihood,
inducing_variable,
*,
mean_function=None,
num_latent_gps: int = 1,
q_diag: bool = False,
q_mu=None,
q_sqrt=None,
whiten: bool = True,
num_data=None,
):
"""
- kernel, likelihood, inducing_variables, mean_function are appropriate
GPflow objects
- num_latent_gps is the number of latent processes to use, defaults to 1
- q_diag is a boolean. If True, the covariance is approximated by a
diagonal matrix.
- whiten is a boolean. If True, we use the whitened representation of
the inducing points.
- num_data is the total number of observations, defaults to X.shape[0]
(relevant when feeding in external minibatches)
"""
# init the super class, accept args
super().__init__(kernel, likelihood, mean_function, num_latent_gps)
self.num_data = num_data
self.q_diag = q_diag
self.whiten = whiten
self.inducing_variable = inducingpoint_wrapper(inducing_variable)
# init variational parameters
num_inducing = len(self.inducing_variable)
self._init_variational_parameters(num_inducing, q_mu, q_sqrt, q_diag)
|
https://github.com/GPflow/GPflow/issues/1578
|
TypeError Traceback (most recent call last)
<ipython-input-24-9a082736eedc> in <module>
38
39
---> 40 optimization_step()
41
42
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
TypeError: in user code:
<ipython-input-24-9a082736eedc>:32 optimization_step *
optimizer.minimize(m.training_loss, m.trainable_variables)
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:374 minimize **
grads_and_vars = self._compute_gradients(
/home/maltamirano/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:429 _compute_gradients
loss_value = loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/training_mixins.py:64 training_loss
return self._training_loss()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/model.py:57 _training_loss
return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:154 maximum_log_likelihood_objective
return self.elbo()
/home/maltamirano/anaconda3/lib/python3.8/site-packages/gpflow/models/sgpr.py:164 elbo
num_inducing = len(self.inducing_variable)
TypeError: 'Tensor' object cannot be interpreted as an integer
|
TypeError
|
def ndiagquad(funcs, H: int, Fmu, Fvar, logspace: bool = False, **Ys):
"""
Computes N Gaussian expectation integrals of one or more functions
using Gauss-Hermite quadrature. The Gaussians must be independent.
The means and variances of the Gaussians are specified by Fmu and Fvar.
The N-integrals are assumed to be taken wrt the last dimensions of Fmu, Fvar.
:param funcs: the integrand(s):
Callable or Iterable of Callables that operates elementwise
:param H: number of Gauss-Hermite quadrature points
:param Fmu: array/tensor or `Din`-tuple/list thereof
:param Fvar: array/tensor or `Din`-tuple/list thereof
:param logspace: if True, funcs are the log-integrands and this calculates
the log-expectation of exp(funcs)
:param **Ys: arrays/tensors; deterministic arguments to be passed by name
Fmu, Fvar, Ys should all have same shape, with overall size `N`
:return: shape is the same as that of the first Fmu
"""
n_gh = H
if isinstance(Fmu, (tuple, list)):
dim = len(Fmu)
shape = tf.shape(Fmu[0])
Fmu = tf.stack(Fmu, axis=-1)
Fvar = tf.stack(Fvar, axis=-1)
else:
dim = 1
shape = tf.shape(Fmu)
Fmu = tf.reshape(Fmu, (-1, dim))
Fvar = tf.reshape(Fvar, (-1, dim))
Ys = {Yname: tf.reshape(Y, (-1, 1)) for Yname, Y in Ys.items()}
def wrapper(old_fun):
def new_fun(X, **Ys):
Xs = tf.unstack(X, axis=-1)
fun_eval = old_fun(*Xs, **Ys)
return tf.cond(
pred=tf.less(tf.rank(fun_eval), tf.rank(X)),
true_fn=lambda: fun_eval[..., tf.newaxis],
false_fn=lambda: fun_eval,
)
return new_fun
if isinstance(funcs, Iterable):
funcs = [wrapper(f) for f in funcs]
else:
funcs = wrapper(funcs)
quadrature = NDiagGHQuadrature(dim, n_gh)
if logspace:
result = quadrature.logspace(funcs, Fmu, Fvar, **Ys)
else:
result = quadrature(funcs, Fmu, Fvar, **Ys)
if isinstance(result, list):
result = [tf.reshape(r, shape) for r in result]
else:
result = tf.reshape(result, shape)
return result
|
def ndiagquad(funcs, H: int, Fmu, Fvar, logspace: bool = False, **Ys):
"""
Computes N Gaussian expectation integrals of one or more functions
using Gauss-Hermite quadrature. The Gaussians must be independent.
The means and variances of the Gaussians are specified by Fmu and Fvar.
The N-integrals are assumed to be taken wrt the last dimensions of Fmu, Fvar.
:param funcs: the integrand(s):
Callable or Iterable of Callables that operates elementwise
:param H: number of Gauss-Hermite quadrature points
:param Fmu: array/tensor or `Din`-tuple/list thereof
:param Fvar: array/tensor or `Din`-tuple/list thereof
:param logspace: if True, funcs are the log-integrands and this calculates
the log-expectation of exp(funcs)
:param **Ys: arrays/tensors; deterministic arguments to be passed by name
Fmu, Fvar, Ys should all have same shape, with overall size `N`
:return: shape is the same as that of the first Fmu
"""
n_gh = H
if isinstance(Fmu, (tuple, list)):
dim = len(Fmu)
shape = tf.shape(Fmu[0])
Fmu = tf.stack(Fmu, axis=-1)
Fvar = tf.stack(Fvar, axis=-1)
else:
dim = 1
shape = tf.shape(Fmu)
Fmu = tf.reshape(Fmu, (-1, dim))
Fvar = tf.reshape(Fvar, (-1, dim))
Ys = {Yname: tf.reshape(Y, (-1, 1)) for Yname, Y in Ys.items()}
def wrapper(old_fun):
def new_fun(X, **Ys):
Xs = tf.unstack(X, axis=-1)
fun_eval = old_fun(*Xs, **Ys)
if tf.rank(fun_eval) < tf.rank(X):
fun_eval = tf.expand_dims(fun_eval, axis=-1)
return fun_eval
return new_fun
if isinstance(funcs, Iterable):
funcs = [wrapper(f) for f in funcs]
else:
funcs = wrapper(funcs)
quadrature = NDiagGHQuadrature(dim, n_gh)
if logspace:
result = quadrature.logspace(funcs, Fmu, Fvar, **Ys)
else:
result = quadrature(funcs, Fmu, Fvar, **Ys)
if isinstance(result, list):
result = [tf.reshape(r, shape) for r in result]
else:
result = tf.reshape(result, shape)
return result
|
https://github.com/GPflow/GPflow/issues/1547
|
Traceback (most recent call last):
File "gpflow_error.py", line 20, in <module>
go()
File "gpflow_error.py", line 16, in go
quad = compute()
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 580, in __call__
result = self._call(*args, **kwds)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 627, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 506, in _initialize
*args, **kwds))
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2446, in _get_concrete_function_internal_garbage_collected
graph_function, _, _ = self._maybe_define_function(args, kwargs)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2777, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2667, in _create_graph_function
capture_by_value=self._capture_by_value),
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py", line 981, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 441, in wrapped_fn
return weak_wrapped_fn().__wrapped__(*args, **kwds)
File "gpflow_error.py", line 11, in compute
quad = quadrature.ndiagquad([lambda *X: tf.exp(X[0])], num_gauss_hermite_points, [mu], [var])
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/gpflow/quadrature/deprecated.py", line 156, in ndiagquad
result = quadrature(funcs, Fmu, Fvar, **Ys)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/gpflow/quadrature/base.py", line 52, in __call__
return [tf.reduce_sum(f(X, *args, **kwargs) * W, axis=-2) for f in fun]
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/gpflow/quadrature/base.py", line 52, in <listcomp>
return [tf.reduce_sum(f(X, *args, **kwargs) * W, axis=-2) for f in fun]
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/gpflow/quadrature/deprecated.py", line 141, in new_fun
if tf.rank(fun_eval) < tf.rank(X):
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 778, in __bool__
self._disallow_bool_casting()
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 542, in _disallow_bool_casting
"using a `tf.Tensor` as a Python `bool`")
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 527, in _disallow_when_autograph_disabled
" Try decorating it directly with @tf.function.".format(task))
tensorflow.python.framework.errors_impl.OperatorNotAllowedInGraphError: using a `tf.Tensor` as a Python `bool` is not allowed: AutoGraph is disabled in this function. Try decorating it directly with @tf.function.
|
tensorflow.python.framework.errors_impl.OperatorNotAllowedInGraphError
|
def wrapper(old_fun):
def new_fun(X, **Ys):
Xs = tf.unstack(X, axis=-1)
fun_eval = old_fun(*Xs, **Ys)
return tf.cond(
pred=tf.less(tf.rank(fun_eval), tf.rank(X)),
true_fn=lambda: fun_eval[..., tf.newaxis],
false_fn=lambda: fun_eval,
)
return new_fun
|
def wrapper(old_fun):
def new_fun(X, **Ys):
Xs = tf.unstack(X, axis=-1)
fun_eval = old_fun(*Xs, **Ys)
if tf.rank(fun_eval) < tf.rank(X):
fun_eval = tf.expand_dims(fun_eval, axis=-1)
return fun_eval
return new_fun
|
https://github.com/GPflow/GPflow/issues/1547
|
Traceback (most recent call last):
File "gpflow_error.py", line 20, in <module>
go()
File "gpflow_error.py", line 16, in go
quad = compute()
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 580, in __call__
result = self._call(*args, **kwds)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 627, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 506, in _initialize
*args, **kwds))
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2446, in _get_concrete_function_internal_garbage_collected
graph_function, _, _ = self._maybe_define_function(args, kwargs)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2777, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2667, in _create_graph_function
capture_by_value=self._capture_by_value),
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py", line 981, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 441, in wrapped_fn
return weak_wrapped_fn().__wrapped__(*args, **kwds)
File "gpflow_error.py", line 11, in compute
quad = quadrature.ndiagquad([lambda *X: tf.exp(X[0])], num_gauss_hermite_points, [mu], [var])
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/gpflow/quadrature/deprecated.py", line 156, in ndiagquad
result = quadrature(funcs, Fmu, Fvar, **Ys)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/gpflow/quadrature/base.py", line 52, in __call__
return [tf.reduce_sum(f(X, *args, **kwargs) * W, axis=-2) for f in fun]
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/gpflow/quadrature/base.py", line 52, in <listcomp>
return [tf.reduce_sum(f(X, *args, **kwargs) * W, axis=-2) for f in fun]
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/gpflow/quadrature/deprecated.py", line 141, in new_fun
if tf.rank(fun_eval) < tf.rank(X):
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 778, in __bool__
self._disallow_bool_casting()
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 542, in _disallow_bool_casting
"using a `tf.Tensor` as a Python `bool`")
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 527, in _disallow_when_autograph_disabled
" Try decorating it directly with @tf.function.".format(task))
tensorflow.python.framework.errors_impl.OperatorNotAllowedInGraphError: using a `tf.Tensor` as a Python `bool` is not allowed: AutoGraph is disabled in this function. Try decorating it directly with @tf.function.
|
tensorflow.python.framework.errors_impl.OperatorNotAllowedInGraphError
|
def new_fun(X, **Ys):
Xs = tf.unstack(X, axis=-1)
fun_eval = old_fun(*Xs, **Ys)
return tf.cond(
pred=tf.less(tf.rank(fun_eval), tf.rank(X)),
true_fn=lambda: fun_eval[..., tf.newaxis],
false_fn=lambda: fun_eval,
)
|
def new_fun(X, **Ys):
Xs = tf.unstack(X, axis=-1)
fun_eval = old_fun(*Xs, **Ys)
if tf.rank(fun_eval) < tf.rank(X):
fun_eval = tf.expand_dims(fun_eval, axis=-1)
return fun_eval
|
https://github.com/GPflow/GPflow/issues/1547
|
Traceback (most recent call last):
File "gpflow_error.py", line 20, in <module>
go()
File "gpflow_error.py", line 16, in go
quad = compute()
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 580, in __call__
result = self._call(*args, **kwds)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 627, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 506, in _initialize
*args, **kwds))
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2446, in _get_concrete_function_internal_garbage_collected
graph_function, _, _ = self._maybe_define_function(args, kwargs)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2777, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 2667, in _create_graph_function
capture_by_value=self._capture_by_value),
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py", line 981, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py", line 441, in wrapped_fn
return weak_wrapped_fn().__wrapped__(*args, **kwds)
File "gpflow_error.py", line 11, in compute
quad = quadrature.ndiagquad([lambda *X: tf.exp(X[0])], num_gauss_hermite_points, [mu], [var])
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/gpflow/quadrature/deprecated.py", line 156, in ndiagquad
result = quadrature(funcs, Fmu, Fvar, **Ys)
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/gpflow/quadrature/base.py", line 52, in __call__
return [tf.reduce_sum(f(X, *args, **kwargs) * W, axis=-2) for f in fun]
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/gpflow/quadrature/base.py", line 52, in <listcomp>
return [tf.reduce_sum(f(X, *args, **kwargs) * W, axis=-2) for f in fun]
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/gpflow/quadrature/deprecated.py", line 141, in new_fun
if tf.rank(fun_eval) < tf.rank(X):
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 778, in __bool__
self._disallow_bool_casting()
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 542, in _disallow_bool_casting
"using a `tf.Tensor` as a Python `bool`")
File "/Users/nferguson/gpflow_error/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 527, in _disallow_when_autograph_disabled
" Try decorating it directly with @tf.function.".format(task))
tensorflow.python.framework.errors_impl.OperatorNotAllowedInGraphError: using a `tf.Tensor` as a Python `bool` is not allowed: AutoGraph is disabled in this function. Try decorating it directly with @tf.function.
|
tensorflow.python.framework.errors_impl.OperatorNotAllowedInGraphError
|
def __init__(
self,
data: OutputData,
latent_dim: int,
X_data_mean: Optional[tf.Tensor] = None,
kernel: Optional[Kernel] = None,
mean_function: Optional[MeanFunction] = None,
):
"""
Initialise GPLVM object. This method only works with a Gaussian likelihood.
:param data: y data matrix, size N (number of points) x D (dimensions)
:param latent_dim: the number of latent dimensions (Q)
:param X_data_mean: latent positions ([N, Q]), for the initialisation of the latent space.
:param kernel: kernel specification, by default Squared Exponential
:param mean_function: mean function, by default None.
"""
if X_data_mean is None:
X_data_mean = pca_reduce(data, latent_dim)
num_latent_gps = X_data_mean.shape[1]
if num_latent_gps != latent_dim:
msg = "Passed in number of latent {0} does not match initial X {1}."
raise ValueError(msg.format(latent_dim, num_latent_gps))
if mean_function is None:
mean_function = Zero()
if kernel is None:
kernel = kernels.SquaredExponential(lengthscales=tf.ones((latent_dim,)))
if data.shape[1] < num_latent_gps:
raise ValueError("More latent dimensions than observed.")
gpr_data = (Parameter(X_data_mean), data_input_to_tensor(data))
super().__init__(gpr_data, kernel, mean_function=mean_function)
|
def __init__(
self,
data: OutputData,
latent_dim: int,
X_data_mean: Optional[tf.Tensor] = None,
kernel: Optional[Kernel] = None,
mean_function: Optional[MeanFunction] = None,
):
"""
Initialise GPLVM object. This method only works with a Gaussian likelihood.
:param data: y data matrix, size N (number of points) x D (dimensions)
:param latent_dim: the number of latent dimensions (Q)
:param X_data_mean: latent positions ([N, Q]), for the initialisation of the latent space.
:param kernel: kernel specification, by default Squared Exponential
:param mean_function: mean function, by default None.
"""
if X_data_mean is None:
X_data_mean = pca_reduce(data, latent_dim)
num_latent_gps = X_data_mean.shape[1]
if num_latent_gps != latent_dim:
msg = "Passed in number of latent {0} does not match initial X {1}."
raise ValueError(msg.format(latent_dim, num_latent_gps))
if mean_function is None:
mean_function = Zero()
if kernel is None:
kernel = kernels.SquaredExponential(lengthscales=tf.ones((latent_dim,)))
if data.shape[1] < num_latent_gps:
raise ValueError("More latent dimensions than observed.")
gpr_data = (Parameter(X_data_mean), data)
super().__init__(gpr_data, kernel, mean_function=mean_function)
|
https://github.com/GPflow/GPflow/issues/1439
|
Traceback (most recent call last):
File "main.py", line 177, in <module>
main(args)
File "main.py", line 64, in main
build_allele(args)
File "/path/to/1_model_sim/drivers.py", line 226, in build_allele
opt_model_list(m)
File "/path/to/1_model_sim/model.py", line 355, in opt_model_list
m.trainable_variables)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/gpflow/optimizers/scipy.py", line 73, in minimize
func, initial_params, jac=True, method=method, **scipy_kwargs
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/scipy/optimize/_minimize.py", line 610, in minimize
callback=callback, **options)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/scipy/optimize/lbfgsb.py", line 345, in _minimize_lbfgsb
f, g = func_and_grad(x)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/scipy/optimize/lbfgsb.py", line 295, in func_and_grad
f = fun(x, *args)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/scipy/optimize/optimize.py", line 327, in function_wrapper
return function(*(wrapper_args + args))
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/scipy/optimize/optimize.py", line 65, in __call__
fg = self.fun(x, *args)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/gpflow/optimizers/scipy.py", line 95, in _eval
loss, grad = _tf_eval(tf.convert_to_tensor(x))
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py", line 568, in __call__
result = self._call(*args, **kwds)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py", line 615, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py", line 497, in _initialize
*args, **kwds))
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py", line 2389, in _get_concrete_function_internal_garbage_collected
graph_function, _, _ = self._maybe_define_function(args, kwargs)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py", line 2703, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py", line 2593, in _create_graph_function
capture_by_value=self._capture_by_value),
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py", line 439, in wrapped_fn
return weak_wrapped_fn().__wrapped__(*args, **kwds)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 968, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in converted code:
/path/to/1_model_sim/venv/lib/python3.6/site-packages/gpflow/optimizers/scipy.py:88 _tf_eval *
loss, grads = _compute_loss_and_gradients(closure, variables)
/path/to/1_model_sim/venv/lib/python3.6/site-packages/gpflow/optimizers/scipy.py:145 _compute_loss_and_gradients *
loss = loss_closure()
/path/to/1_model_sim/model.py:354 None *
opt.minimize(lambda: - m.log_marginal_likelihood(),
/path/to/1_model_sim/venv/lib/python3.6/site-packages/gpflow/models/gpr.py:75 log_marginal_likelihood *
log_prob = multivariate_normal(Y, m, L)
/path/to/1_model_sim/venv/lib/python3.6/site-packages/gpflow/logdensities.py:95 multivariate_normal *
d = x - mu
/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/ops/math_ops.py:927 r_binary_op_wrapper
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py:1314 convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_conversion_registry.py:52 _default_conversion_function
return constant_op.constant(value, dtype, name=name)
/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py:258 constant
allow_broadcast=True)
/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py:296 _constant_impl
allow_broadcast=allow_broadcast))
/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py:522 make_tensor_proto
"Cannot create a tensor proto whose content is larger than 2GB.")
ValueError: Cannot create a tensor proto whose content is larger than 2GB.
|
ValueError
|
def __init__(
self,
data: OutputData,
X_data_mean: tf.Tensor,
X_data_var: tf.Tensor,
kernel: Kernel,
num_inducing_variables: Optional[int] = None,
inducing_variable=None,
X_prior_mean=None,
X_prior_var=None,
):
"""
Initialise Bayesian GPLVM object. This method only works with a Gaussian likelihood.
:param data: data matrix, size N (number of points) x D (dimensions)
:param X_data_mean: initial latent positions, size N (number of points) x Q (latent dimensions).
:param X_data_var: variance of latent positions ([N, Q]), for the initialisation of the latent space.
:param kernel: kernel specification, by default Squared Exponential
:param num_inducing_variables: number of inducing points, M
:param inducing_variable: matrix of inducing points, size M (inducing points) x Q (latent dimensions). By default
random permutation of X_data_mean.
:param X_prior_mean: prior mean used in KL term of bound. By default 0. Same size as X_data_mean.
:param X_prior_var: prior variance used in KL term of bound. By default 1.
"""
num_data, num_latent_gps = X_data_mean.shape
super().__init__(kernel, likelihoods.Gaussian(), num_latent_gps=num_latent_gps)
self.data = data_input_to_tensor(data)
assert X_data_var.ndim == 2
self.X_data_mean = Parameter(X_data_mean)
self.X_data_var = Parameter(X_data_var, transform=positive())
self.num_data = num_data
self.output_dim = self.data.shape[-1]
assert np.all(X_data_mean.shape == X_data_var.shape)
assert X_data_mean.shape[0] == self.data.shape[0], "X mean and Y must be same size."
assert X_data_var.shape[0] == self.data.shape[0], "X var and Y must be same size."
if (inducing_variable is None) == (num_inducing_variables is None):
raise ValueError(
"BayesianGPLVM needs exactly one of `inducing_variable` and `num_inducing_variables`"
)
if inducing_variable is None:
# By default we initialize by subset of initial latent points
# Note that tf.random.shuffle returns a copy, it does not shuffle in-place
Z = tf.random.shuffle(X_data_mean)[:num_inducing_variables]
inducing_variable = InducingPoints(Z)
self.inducing_variable = inducingpoint_wrapper(inducing_variable)
assert X_data_mean.shape[1] == self.num_latent_gps
# deal with parameters for the prior mean variance of X
if X_prior_mean is None:
X_prior_mean = tf.zeros(
(self.num_data, self.num_latent_gps), dtype=default_float()
)
if X_prior_var is None:
X_prior_var = tf.ones((self.num_data, self.num_latent_gps))
self.X_prior_mean = tf.convert_to_tensor(
np.atleast_1d(X_prior_mean), dtype=default_float()
)
self.X_prior_var = tf.convert_to_tensor(
np.atleast_1d(X_prior_var), dtype=default_float()
)
assert self.X_prior_mean.shape[0] == self.num_data
assert self.X_prior_mean.shape[1] == self.num_latent_gps
assert self.X_prior_var.shape[0] == self.num_data
assert self.X_prior_var.shape[1] == self.num_latent_gps
|
def __init__(
self,
data: OutputData,
X_data_mean: tf.Tensor,
X_data_var: tf.Tensor,
kernel: Kernel,
num_inducing_variables: Optional[int] = None,
inducing_variable=None,
X_prior_mean=None,
X_prior_var=None,
):
"""
Initialise Bayesian GPLVM object. This method only works with a Gaussian likelihood.
:param data: data matrix, size N (number of points) x D (dimensions)
:param X_data_mean: initial latent positions, size N (number of points) x Q (latent dimensions).
:param X_data_var: variance of latent positions ([N, Q]), for the initialisation of the latent space.
:param kernel: kernel specification, by default Squared Exponential
:param num_inducing_variables: number of inducing points, M
:param inducing_variable: matrix of inducing points, size M (inducing points) x Q (latent dimensions). By default
random permutation of X_data_mean.
:param X_prior_mean: prior mean used in KL term of bound. By default 0. Same size as X_data_mean.
:param X_prior_var: prior variance used in KL term of bound. By default 1.
"""
num_data, num_latent_gps = X_data_mean.shape
super().__init__(kernel, likelihoods.Gaussian(), num_latent_gps=num_latent_gps)
self.data = data
assert X_data_var.ndim == 2
self.X_data_mean = Parameter(X_data_mean)
self.X_data_var = Parameter(X_data_var, transform=positive())
self.num_data = num_data
self.output_dim = data.shape[-1]
assert np.all(X_data_mean.shape == X_data_var.shape)
assert X_data_mean.shape[0] == data.shape[0], "X mean and Y must be same size."
assert X_data_var.shape[0] == data.shape[0], "X var and Y must be same size."
if (inducing_variable is None) == (num_inducing_variables is None):
raise ValueError(
"BayesianGPLVM needs exactly one of `inducing_variable` and `num_inducing_variables`"
)
if inducing_variable is None:
# By default we initialize by subset of initial latent points
# Note that tf.random.shuffle returns a copy, it does not shuffle in-place
Z = tf.random.shuffle(X_data_mean)[:num_inducing_variables]
inducing_variable = InducingPoints(Z)
self.inducing_variable = inducingpoint_wrapper(inducing_variable)
assert X_data_mean.shape[1] == self.num_latent_gps
# deal with parameters for the prior mean variance of X
if X_prior_mean is None:
X_prior_mean = tf.zeros(
(self.num_data, self.num_latent_gps), dtype=default_float()
)
if X_prior_var is None:
X_prior_var = tf.ones((self.num_data, self.num_latent_gps))
self.X_prior_mean = tf.convert_to_tensor(
np.atleast_1d(X_prior_mean), dtype=default_float()
)
self.X_prior_var = tf.convert_to_tensor(
np.atleast_1d(X_prior_var), dtype=default_float()
)
assert self.X_prior_mean.shape[0] == self.num_data
assert self.X_prior_mean.shape[1] == self.num_latent_gps
assert self.X_prior_var.shape[0] == self.num_data
assert self.X_prior_var.shape[1] == self.num_latent_gps
|
https://github.com/GPflow/GPflow/issues/1439
|
Traceback (most recent call last):
File "main.py", line 177, in <module>
main(args)
File "main.py", line 64, in main
build_allele(args)
File "/path/to/1_model_sim/drivers.py", line 226, in build_allele
opt_model_list(m)
File "/path/to/1_model_sim/model.py", line 355, in opt_model_list
m.trainable_variables)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/gpflow/optimizers/scipy.py", line 73, in minimize
func, initial_params, jac=True, method=method, **scipy_kwargs
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/scipy/optimize/_minimize.py", line 610, in minimize
callback=callback, **options)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/scipy/optimize/lbfgsb.py", line 345, in _minimize_lbfgsb
f, g = func_and_grad(x)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/scipy/optimize/lbfgsb.py", line 295, in func_and_grad
f = fun(x, *args)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/scipy/optimize/optimize.py", line 327, in function_wrapper
return function(*(wrapper_args + args))
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/scipy/optimize/optimize.py", line 65, in __call__
fg = self.fun(x, *args)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/gpflow/optimizers/scipy.py", line 95, in _eval
loss, grad = _tf_eval(tf.convert_to_tensor(x))
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py", line 568, in __call__
result = self._call(*args, **kwds)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py", line 615, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py", line 497, in _initialize
*args, **kwds))
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py", line 2389, in _get_concrete_function_internal_garbage_collected
graph_function, _, _ = self._maybe_define_function(args, kwargs)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py", line 2703, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py", line 2593, in _create_graph_function
capture_by_value=self._capture_by_value),
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py", line 439, in wrapped_fn
return weak_wrapped_fn().__wrapped__(*args, **kwds)
File "/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 968, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in converted code:
/path/to/1_model_sim/venv/lib/python3.6/site-packages/gpflow/optimizers/scipy.py:88 _tf_eval *
loss, grads = _compute_loss_and_gradients(closure, variables)
/path/to/1_model_sim/venv/lib/python3.6/site-packages/gpflow/optimizers/scipy.py:145 _compute_loss_and_gradients *
loss = loss_closure()
/path/to/1_model_sim/model.py:354 None *
opt.minimize(lambda: - m.log_marginal_likelihood(),
/path/to/1_model_sim/venv/lib/python3.6/site-packages/gpflow/models/gpr.py:75 log_marginal_likelihood *
log_prob = multivariate_normal(Y, m, L)
/path/to/1_model_sim/venv/lib/python3.6/site-packages/gpflow/logdensities.py:95 multivariate_normal *
d = x - mu
/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/ops/math_ops.py:927 r_binary_op_wrapper
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py:1314 convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_conversion_registry.py:52 _default_conversion_function
return constant_op.constant(value, dtype, name=name)
/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py:258 constant
allow_broadcast=True)
/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py:296 _constant_impl
allow_broadcast=allow_broadcast))
/path/to/1_model_sim/venv/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py:522 make_tensor_proto
"Cannot create a tensor proto whose content is larger than 2GB.")
ValueError: Cannot create a tensor proto whose content is larger than 2GB.
|
ValueError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.