after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def return_response(self, method, path, data, headers, response):
req_data = None
if method == "POST" and path == "/":
req_data = urlparse.parse_qs(to_str(data))
action = req_data.get("Action")[0]
if req_data:
if action == "DescribeStackResources":
if response.status_code < 300:
response_dict = xmltodict.parse(response.content)[
"DescribeStackResourcesResponse"
]
resources = response_dict["DescribeStackResourcesResult"][
"StackResources"
]
if not resources:
# Check if stack exists
stack_name = req_data.get("StackName")[0]
cloudformation_client = aws_stack.connect_to_service(
"cloudformation"
)
try:
cloudformation_client.describe_stacks(StackName=stack_name)
except Exception:
return error_response(
"Stack with id %s does not exist" % stack_name, code=404
)
if action == "DescribeStackResource":
if response.status_code >= 500:
# fix an error in moto where it fails with 500 if the stack does not exist
return error_response("Stack resource does not exist", code=404)
elif action == "CreateStack" or action == "UpdateStack":
if response.status_code >= 400 and response.status_code < 500:
return response
# run the actual deployment
template = template_deployer.template_to_json(
req_data.get("TemplateBody")[0]
)
template_deployer.deploy_template(template, req_data.get("StackName")[0])
|
def return_response(self, method, path, data, headers, response):
req_data = None
if method == "POST" and path == "/":
req_data = urlparse.parse_qs(to_str(data))
action = req_data.get("Action")[0]
if req_data:
if action == "DescribeStackResources":
if response.status_code < 300:
response_dict = xmltodict.parse(response.content)[
"DescribeStackResourcesResponse"
]
resources = response_dict["DescribeStackResourcesResult"][
"StackResources"
]
if not resources:
# Check if stack exists
stack_name = req_data.get("StackName")[0]
cloudformation_client = aws_stack.connect_to_service(
"cloudformation"
)
try:
cloudformation_client.describe_stacks(StackName=stack_name)
except Exception:
return error_response(
"Stack with id %s does not exist" % stack_name, code=404
)
if action == "DescribeStackResource":
if response.status_code >= 500:
# fix an error in moto where it fails with 500 if the stack does not exist
return error_response("Stack resource does not exist", code=404)
elif action == "CreateStack" or action == "UpdateStack":
# run the actual deployment
template = template_deployer.template_to_json(
req_data.get("TemplateBody")[0]
)
template_deployer.deploy_template(template, req_data.get("StackName")[0])
if response.status_code >= 400:
return make_response(action)
|
https://github.com/localstack/localstack/issues/395
|
2017-10-11T05:49:47:INFO:werkzeug: 192.168.99.1 - - [11/Oct/2017 05:49:47] "GET / HTTP/1.1" 200 -
2017-10-11T05:49:47:INFO:werkzeug: 192.168.99.1 - - [11/Oct/2017 05:49:47] "GET //192.168.99.103:8080/swagger.json HTTP/1.1" 200 -
2017-10-11T05:49:48:INFO:werkzeug: 192.168.99.1 - - [11/Oct/2017 05:49:48] "GET /img/localstack_icon.png HTTP/1.1" 200 -
2017-10-11T05:49:49:INFO:werkzeug: 192.168.99.1 - - [11/Oct/2017 05:49:49] "POST /graph HTTP/1.1" 500 -
Traceback (most recent call last):
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1836, in __call__
return self.wsgi_app(environ, start_response)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1820, in wsgi_app
response = self.make_response(self.handle_exception(e))
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1403, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1817, in wsgi_app
response = self.full_dispatch_request()
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1477, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1381, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/code/localstack/localstack/dashboard/api.py", line 37, in get_graph
graph = infra.get_graph(name_filter=data['nameFilter'], env=env)
File "/opt/code/localstack/localstack/dashboard/infra.py", line 432, in get_graph
domains = get_elasticsearch_domains(name_filter, pool=pool, env=env)
File "/opt/code/localstack/localstack/dashboard/infra.py", line 312, in get_elasticsearch_domains
parallelize(handle, out['DomainNames'])
File "/opt/code/localstack/localstack/utils/common.py", line 698, in parallelize
result = pool.map(func, list)
File "/usr/lib/python2.7/multiprocessing/pool.py", line 251, in map
return self.map_async(func, iterable, chunksize).get()
File "/usr/lib/python2.7/multiprocessing/pool.py", line 567, in get
raise self._value
KeyError: 'Endpoint'
|
KeyError
|
def forward_request(self, method, path, data, headers):
modified_data = None
# If this request contains streaming v4 authentication signatures, strip them from the message
# Related isse: https://github.com/localstack/localstack/issues/98
# TODO we should evaluate whether to replace moto s3 with scality/S3:
# https://github.com/scality/S3/issues/237
if headers.get("x-amz-content-sha256") == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD":
modified_data = strip_chunk_signatures(data)
# POST requests to S3 may include a "${filename}" placeholder in the
# key, which should be replaced with an actual file name before storing.
if method == "POST":
original_data = modified_data or data
expanded_data = expand_multipart_filename(original_data, headers)
if expanded_data is not original_data:
modified_data = expanded_data
# persist this API call to disk
persistence.record("s3", method, path, data, headers)
parsed = urlparse.urlparse(path)
query = parsed.query
path = parsed.path
bucket = path.split("/")[1]
query_map = urlparse.parse_qs(query)
if query == "notification" or "notification" in query_map:
response = Response()
response.status_code = 200
if method == "GET":
# TODO check if bucket exists
result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3
if bucket in S3_NOTIFICATIONS:
notif = S3_NOTIFICATIONS[bucket]
events_string = "\n".join(
["<Event>%s</Event>" % e for e in notif["Event"]]
)
for dest in ["Queue", "Topic", "CloudFunction"]:
if dest in notif:
result += (
"""<{dest}Configuration>
<Id>{uid}</Id>
<{dest}>{endpoint}</{dest}>
{events}
</{dest}Configuration>"""
).format(
dest=dest,
uid=uuid.uuid4(),
endpoint=notif[dest],
events=events_string,
)
result += "</NotificationConfiguration>"
response._content = result
if method == "PUT":
parsed = xmltodict.parse(data)
notif_config = parsed.get("NotificationConfiguration")
S3_NOTIFICATIONS.pop(bucket, None)
for dest in ["Queue", "Topic", "CloudFunction"]:
config = notif_config.get("%sConfiguration" % (dest))
if config:
events = config.get("Event")
if isinstance(events, six.string_types):
events = [events]
notification_details = {
"Id": config.get("Id"),
"Event": events,
dest: config.get(dest),
"Filter": config.get("Filter"),
}
# TODO: what if we have multiple destinations - would we overwrite the config?
S3_NOTIFICATIONS[bucket] = clone(notification_details)
# return response for ?notification request
return response
if query == "cors" or "cors" in query_map:
if method == "GET":
return get_cors(bucket)
if method == "PUT":
return set_cors(bucket, data)
if method == "DELETE":
return delete_cors(bucket)
if modified_data:
return Request(data=modified_data, headers=headers, method=method)
return True
|
def forward_request(self, method, path, data, headers):
modified_data = None
# If this request contains streaming v4 authentication signatures, strip them from the message
# Related isse: https://github.com/localstack/localstack/issues/98
# TODO we should evaluate whether to replace moto s3 with scality/S3:
# https://github.com/scality/S3/issues/237
if headers.get("x-amz-content-sha256") == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD":
modified_data = strip_chunk_signatures(data)
# POST requests to S3 may include a "${filename}" placeholder in the
# key, which should be replaced with an actual file name before storing.
if method == "POST":
original_data = modified_data or data
expanded_data = expand_multipart_filename(original_data, headers)
if expanded_data is not original_data:
modified_data = expanded_data
# persist this API call to disk
persistence.record("s3", method, path, data, headers)
parsed = urlparse.urlparse(path)
query = parsed.query
path = parsed.path
bucket = path.split("/")[1]
query_map = urlparse.parse_qs(query)
if query == "notification" or "notification" in query_map:
response = Response()
response.status_code = 200
if method == "GET":
# TODO check if bucket exists
result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3
if bucket in S3_NOTIFICATIONS:
notif = S3_NOTIFICATIONS[bucket]
events_string = "\n".join(
["<Event>%s</Event>" % e for e in notif["Event"]]
)
for dest in ["Queue", "Topic", "CloudFunction"]:
if dest in notif:
result += (
"""<{dest}Configuration>
<Id>{uid}</Id>
<{dest}>{endpoint}</{dest}>
{events}
</{dest}Configuration>"""
).format(
dest=dest,
uid=uuid.uuid4(),
endpoint=notif[dest],
events=events_string,
)
result += "</NotificationConfiguration>"
response._content = result
if method == "PUT":
parsed = xmltodict.parse(data)
notif_config = parsed.get("NotificationConfiguration")
for dest in ["Queue", "Topic", "CloudFunction"]:
config = notif_config.get("%sConfiguration" % (dest))
if config:
# TODO: what if we have multiple destinations - would we overwrite the config?
notification_details = {
"Id": config.get("Id"),
"Event": config.get("Event"),
dest: config.get(dest),
"Filter": config.get("Filter"),
}
S3_NOTIFICATIONS[bucket] = json.loads(
json.dumps(notification_details)
)
# return response for ?notification request
return response
if query == "cors" or "cors" in query_map:
if method == "GET":
return get_cors(bucket)
if method == "PUT":
return set_cors(bucket, data)
if method == "DELETE":
return delete_cors(bucket)
if modified_data:
return Request(data=modified_data, headers=headers, method=method)
return True
|
https://github.com/localstack/localstack/issues/396
|
2017-10-10T10:58:34:ERROR:localstack.services.generic_proxy: Error forwarding request: string indices must be integers Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/localstack/services/generic_proxy.py", line 194, in forward
path=path, data=data, headers=forward_headers, response=response)
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 436, in return_response
send_notifications(method, bucket_name, object_path)
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 122, in send_notifications
filter_rules_match(config.get('Filter'), object_path)):
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 50, in filter_rules_match
if rule['Name'] == 'prefix':
TypeError: string indices must be integers
|
TypeError
|
def filter_rules_match(filters, object_path):
"""check whether the given object path matches all of the given filters"""
filters = filters or {}
s3_filter = _get_s3_filter(filters)
for rule in s3_filter.get("FilterRule", []):
if rule["Name"] == "prefix":
if not prefix_with_slash(object_path).startswith(
prefix_with_slash(rule["Value"])
):
return False
elif rule["Name"] == "suffix":
if not object_path.endswith(rule["Value"]):
return False
else:
LOGGER.warning('Unknown filter name: "%s"' % rule["Name"])
return True
|
def filter_rules_match(filters, object_path):
"""check whether the given object path matches all of the given filters"""
filters = filters or {}
key_filter = filters.get("S3Key", filters.get("Key", {}))
for rule in key_filter.get("FilterRule", []):
if rule["Name"] == "prefix":
if not prefix_with_slash(object_path).startswith(
prefix_with_slash(rule["Value"])
):
return False
elif rule["Name"] == "suffix":
if not object_path.endswith(rule["Value"]):
return False
else:
LOGGER.warning('Unknown filter name: "%s"' % rule["Name"])
return True
|
https://github.com/localstack/localstack/issues/396
|
2017-10-10T10:58:34:ERROR:localstack.services.generic_proxy: Error forwarding request: string indices must be integers Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/localstack/services/generic_proxy.py", line 194, in forward
path=path, data=data, headers=forward_headers, response=response)
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 436, in return_response
send_notifications(method, bucket_name, object_path)
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 122, in send_notifications
filter_rules_match(config.get('Filter'), object_path)):
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 50, in filter_rules_match
if rule['Name'] == 'prefix':
TypeError: string indices must be integers
|
TypeError
|
def expand_multipart_filename(data, headers):
"""Replace instance of '${filename}' in key with given file name.
Data is given as multipart form submission bytes, and file name is
replace according to Amazon S3 documentation for Post uploads:
http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html
"""
_, params = cgi.parse_header(headers.get("Content-Type", ""))
if "boundary" not in params:
return data
boundary = params["boundary"].encode("ascii")
data_bytes = to_bytes(data)
filename = None
for disposition, _ in _iter_multipart_parts(data_bytes, boundary):
if disposition.get("name") == "file" and "filename" in disposition:
filename = disposition["filename"]
break
if filename is None:
# Found nothing, return unaltered
return data
for disposition, part in _iter_multipart_parts(data_bytes, boundary):
if disposition.get("name") == "key" and b"${filename}" in part:
search = boundary + part
replace = boundary + part.replace(b"${filename}", filename.encode("utf8"))
if search in data_bytes:
return data_bytes.replace(search, replace)
return data
|
def expand_multipart_filename(data, headers):
"""Replace instance of '${filename}' in key with given file name.
Data is given as multipart form submission bytes, and file name is
replace according to Amazon S3 documentation for Post uploads:
http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html
"""
_, params = cgi.parse_header(headers.get("Content-Type"))
if "boundary" not in params:
return data
boundary = params["boundary"].encode("ascii")
data_bytes = to_bytes(data)
filename = None
for disposition, _ in _iter_multipart_parts(data_bytes, boundary):
if disposition.get("name") == "file" and "filename" in disposition:
filename = disposition["filename"]
break
if filename is None:
# Found nothing, return unaltered
return data
for disposition, part in _iter_multipart_parts(data_bytes, boundary):
if disposition.get("name") == "key" and b"${filename}" in part:
search = boundary + part
replace = boundary + part.replace(b"${filename}", filename.encode("utf8"))
if search in data_bytes:
return data_bytes.replace(search, replace)
return data
|
https://github.com/localstack/localstack/issues/396
|
2017-10-10T10:58:34:ERROR:localstack.services.generic_proxy: Error forwarding request: string indices must be integers Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/localstack/services/generic_proxy.py", line 194, in forward
path=path, data=data, headers=forward_headers, response=response)
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 436, in return_response
send_notifications(method, bucket_name, object_path)
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 122, in send_notifications
filter_rules_match(config.get('Filter'), object_path)):
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 50, in filter_rules_match
if rule['Name'] == 'prefix':
TypeError: string indices must be integers
|
TypeError
|
def find_multipart_redirect_url(data, headers):
"""Return object key and redirect URL if they can be found.
Data is given as multipart form submission bytes, and redirect is found
in the success_action_redirect field according to Amazon S3
documentation for Post uploads:
http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html
"""
_, params = cgi.parse_header(headers.get("Content-Type", ""))
key, redirect_url = None, None
if "boundary" not in params:
return key, redirect_url
boundary = params["boundary"].encode("ascii")
data_bytes = to_bytes(data)
for disposition, part in _iter_multipart_parts(data_bytes, boundary):
if disposition.get("name") == "key":
_, value = part.split(b"\r\n\r\n", 1)
key = value.rstrip(b"\r\n--").decode("utf8")
if key:
for disposition, part in _iter_multipart_parts(data_bytes, boundary):
if disposition.get("name") == "success_action_redirect":
_, value = part.split(b"\r\n\r\n", 1)
redirect_url = value.rstrip(b"\r\n--").decode("utf8")
return key, redirect_url
|
def find_multipart_redirect_url(data, headers):
"""Return object key and redirect URL if they can be found.
Data is given as multipart form submission bytes, and redirect is found
in the success_action_redirect field according to Amazon S3
documentation for Post uploads:
http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html
"""
_, params = cgi.parse_header(headers.get("Content-Type"))
boundary = params["boundary"].encode("ascii")
data_bytes = to_bytes(data)
key, redirect_url = None, None
for disposition, part in _iter_multipart_parts(data_bytes, boundary):
if disposition.get("name") == "key":
_, value = part.split(b"\r\n\r\n", 1)
key = value.rstrip(b"\r\n--").decode("utf8")
if key:
for disposition, part in _iter_multipart_parts(data_bytes, boundary):
if disposition.get("name") == "success_action_redirect":
_, value = part.split(b"\r\n\r\n", 1)
redirect_url = value.rstrip(b"\r\n--").decode("utf8")
return key, redirect_url
|
https://github.com/localstack/localstack/issues/396
|
2017-10-10T10:58:34:ERROR:localstack.services.generic_proxy: Error forwarding request: string indices must be integers Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/localstack/services/generic_proxy.py", line 194, in forward
path=path, data=data, headers=forward_headers, response=response)
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 436, in return_response
send_notifications(method, bucket_name, object_path)
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 122, in send_notifications
filter_rules_match(config.get('Filter'), object_path)):
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 50, in filter_rules_match
if rule['Name'] == 'prefix':
TypeError: string indices must be integers
|
TypeError
|
def forward_request(self, method, path, data, headers):
modified_data = None
# If this request contains streaming v4 authentication signatures, strip them from the message
# Related isse: https://github.com/localstack/localstack/issues/98
# TODO we should evaluate whether to replace moto s3 with scality/S3:
# https://github.com/scality/S3/issues/237
if headers.get("x-amz-content-sha256") == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD":
modified_data = strip_chunk_signatures(data)
# POST requests to S3 may include a "${filename}" placeholder in the
# key, which should be replaced with an actual file name before storing.
if method == "POST":
original_data = modified_data or data
expanded_data = expand_multipart_filename(original_data, headers)
if expanded_data is not original_data:
modified_data = expanded_data
# persist this API call to disk
persistence.record("s3", method, path, data, headers)
parsed = urlparse.urlparse(path)
query = parsed.query
path = parsed.path
bucket = path.split("/")[1]
query_map = urlparse.parse_qs(query)
if query == "notification" or "notification" in query_map:
response = Response()
response.status_code = 200
if method == "GET":
# TODO check if bucket exists
result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3
if bucket in S3_NOTIFICATIONS:
notif = S3_NOTIFICATIONS[bucket]
for dest in ["Queue", "Topic", "CloudFunction"]:
if dest in notif:
dest_dict = {
"%sConfiguration" % dest: {
"Id": uuid.uuid4(),
dest: notif[dest],
"Event": notif["Event"],
"Filter": notif["Filter"],
}
}
result += xmltodict.unparse(dest_dict, full_document=False)
result += "</NotificationConfiguration>"
response._content = result
if method == "PUT":
parsed = xmltodict.parse(data)
notif_config = parsed.get("NotificationConfiguration")
S3_NOTIFICATIONS.pop(bucket, None)
for dest in ["Queue", "Topic", "CloudFunction"]:
config = notif_config.get("%sConfiguration" % (dest))
if config:
events = config.get("Event")
if isinstance(events, six.string_types):
events = [events]
event_filter = config.get("Filter", {})
# make sure FilterRule is an array
s3_filter = _get_s3_filter(event_filter)
if s3_filter and not isinstance(
s3_filter.get("FilterRule", []), list
):
s3_filter["FilterRule"] = [s3_filter["FilterRule"]]
# create final details dict
notification_details = {
"Id": config.get("Id"),
"Event": events,
dest: config.get(dest),
"Filter": event_filter,
}
# TODO: what if we have multiple destinations - would we overwrite the config?
S3_NOTIFICATIONS[bucket] = clone(notification_details)
# return response for ?notification request
return response
if query == "cors" or "cors" in query_map:
if method == "GET":
return get_cors(bucket)
if method == "PUT":
return set_cors(bucket, data)
if method == "DELETE":
return delete_cors(bucket)
if modified_data:
return Request(data=modified_data, headers=headers, method=method)
return True
|
def forward_request(self, method, path, data, headers):
modified_data = None
# If this request contains streaming v4 authentication signatures, strip them from the message
# Related isse: https://github.com/localstack/localstack/issues/98
# TODO we should evaluate whether to replace moto s3 with scality/S3:
# https://github.com/scality/S3/issues/237
if headers.get("x-amz-content-sha256") == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD":
modified_data = strip_chunk_signatures(data)
# POST requests to S3 may include a "${filename}" placeholder in the
# key, which should be replaced with an actual file name before storing.
if method == "POST":
original_data = modified_data or data
expanded_data = expand_multipart_filename(original_data, headers)
if expanded_data is not original_data:
modified_data = expanded_data
# persist this API call to disk
persistence.record("s3", method, path, data, headers)
parsed = urlparse.urlparse(path)
query = parsed.query
path = parsed.path
bucket = path.split("/")[1]
query_map = urlparse.parse_qs(query)
if query == "notification" or "notification" in query_map:
response = Response()
response.status_code = 200
if method == "GET":
# TODO check if bucket exists
result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3
if bucket in S3_NOTIFICATIONS:
notif = S3_NOTIFICATIONS[bucket]
for dest in ["Queue", "Topic", "CloudFunction"]:
if dest in notif:
dest_dict = {
"%sConfiguration" % dest: {
"Id": uuid.uuid4(),
dest: notif[dest],
"Event": notif["Event"],
"Filter": notif["Filter"],
}
}
result += xmltodict.unparse(dest_dict, full_document=False)
result += "</NotificationConfiguration>"
response._content = result
if method == "PUT":
parsed = xmltodict.parse(data)
notif_config = parsed.get("NotificationConfiguration")
S3_NOTIFICATIONS.pop(bucket, None)
for dest in ["Queue", "Topic", "CloudFunction"]:
config = notif_config.get("%sConfiguration" % (dest))
if config:
events = config.get("Event")
if isinstance(events, six.string_types):
events = [events]
notification_details = {
"Id": config.get("Id"),
"Event": events,
dest: config.get(dest),
"Filter": config.get("Filter", {}),
}
# TODO: what if we have multiple destinations - would we overwrite the config?
S3_NOTIFICATIONS[bucket] = clone(notification_details)
# return response for ?notification request
return response
if query == "cors" or "cors" in query_map:
if method == "GET":
return get_cors(bucket)
if method == "PUT":
return set_cors(bucket, data)
if method == "DELETE":
return delete_cors(bucket)
if modified_data:
return Request(data=modified_data, headers=headers, method=method)
return True
|
https://github.com/localstack/localstack/issues/396
|
2017-10-10T10:58:34:ERROR:localstack.services.generic_proxy: Error forwarding request: string indices must be integers Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/localstack/services/generic_proxy.py", line 194, in forward
path=path, data=data, headers=forward_headers, response=response)
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 436, in return_response
send_notifications(method, bucket_name, object_path)
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 122, in send_notifications
filter_rules_match(config.get('Filter'), object_path)):
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 50, in filter_rules_match
if rule['Name'] == 'prefix':
TypeError: string indices must be integers
|
TypeError
|
def expand_multipart_filename(data, headers):
"""Replace instance of '${filename}' in key with given file name.
Data is given as multipart form submission bytes, and file name is
replace according to Amazon S3 documentation for Post uploads:
http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html
"""
_, params = cgi.parse_header(headers.get("Content-Type"))
if "boundary" not in params:
return data
boundary = params["boundary"].encode("ascii")
data_bytes = to_bytes(data)
filename = None
for disposition, _ in _iter_multipart_parts(data_bytes, boundary):
if disposition.get("name") == "file" and "filename" in disposition:
filename = disposition["filename"]
break
if filename is None:
# Found nothing, return unaltered
return data
for disposition, part in _iter_multipart_parts(data_bytes, boundary):
if disposition.get("name") == "key" and b"${filename}" in part:
search = boundary + part
replace = boundary + part.replace(b"${filename}", filename.encode("utf8"))
if search in data_bytes:
return data_bytes.replace(search, replace)
return data
|
def expand_multipart_filename(data, headers):
"""Replace instance of '${filename}' in key with given file name.
Data is given as multipart form submission bytes, and file name is
replace according to Amazon S3 documentation for Post uploads:
http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html
"""
_, params = cgi.parse_header(headers.get("Content-Type"))
boundary = params["boundary"].encode("ascii")
data_bytes = to_bytes(data)
filename = None
for disposition, _ in _iter_multipart_parts(data_bytes, boundary):
if disposition.get("name") == "file" and "filename" in disposition:
filename = disposition["filename"]
break
if filename is None:
# Found nothing, return unaltered
return data
for disposition, part in _iter_multipart_parts(data_bytes, boundary):
if disposition.get("name") == "key" and b"${filename}" in part:
search = boundary + part
replace = boundary + part.replace(b"${filename}", filename.encode("utf8"))
if search in data_bytes:
return data_bytes.replace(search, replace)
return data
|
https://github.com/localstack/localstack/issues/310
|
2017-09-11T04:24:30:ERROR:localstack.services.generic_proxy: Error forwarding request: 'boundary' Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 166, in forward
path=path, data=data, headers=forward_headers)
File "/opt/code/localstack/localstack/services/s3/s3_listener.py", line 345, in forward_request
expanded_data = expand_multipart_filename(original_data, headers)
File "/opt/code/localstack/localstack/services/s3/s3_listener.py", line 261, in expand_multipart_filename
boundary = params['boundary'].encode('ascii')
KeyError: 'boundary'
|
KeyError
|
def post_request():
action = request.headers.get("x-amz-target")
data = json.loads(to_str(request.data))
result = {}
kinesis = aws_stack.connect_to_service("kinesis")
if action == "%s.ListStreams" % ACTION_HEADER_PREFIX:
result = {
"Streams": list(DDB_STREAMS.values()),
"LastEvaluatedStreamArn": "TODO",
}
elif action == "%s.DescribeStream" % ACTION_HEADER_PREFIX:
for stream in DDB_STREAMS.values():
if stream["StreamArn"] == data["StreamArn"]:
result = {"StreamDescription": stream}
# get stream details
dynamodb = aws_stack.connect_to_service("dynamodb")
table_name = table_name_from_stream_arn(stream["StreamArn"])
stream_name = get_kinesis_stream_name(table_name)
stream_details = kinesis.describe_stream(StreamName=stream_name)
table_details = dynamodb.describe_table(TableName=table_name)
stream["KeySchema"] = table_details["Table"]["KeySchema"]
stream["Shards"] = stream_details["StreamDescription"]["Shards"]
break
if not result:
return error_response(
"Requested resource not found", error_type="ResourceNotFoundException"
)
elif action == "%s.GetShardIterator" % ACTION_HEADER_PREFIX:
# forward request to Kinesis API
stream_name = stream_name_from_stream_arn(data["StreamArn"])
result = kinesis.get_shard_iterator(
StreamName=stream_name,
ShardId=data["ShardId"],
ShardIteratorType=data["ShardIteratorType"],
)
elif action == "%s.GetRecords" % ACTION_HEADER_PREFIX:
kinesis_records = kinesis.get_records(**data)
result = {"Records": []}
for record in kinesis_records["Records"]:
result["Records"].append(json.loads(to_str(record["Data"])))
else:
print('WARNING: Unknown operation "%s"' % action)
return jsonify(result)
|
def post_request():
action = request.headers.get("x-amz-target")
data = json.loads(to_str(request.data))
result = None
kinesis = aws_stack.connect_to_service("kinesis")
if action == "%s.ListStreams" % ACTION_HEADER_PREFIX:
result = {
"Streams": list(DDB_STREAMS.values()),
"LastEvaluatedStreamArn": "TODO",
}
elif action == "%s.DescribeStream" % ACTION_HEADER_PREFIX:
for stream in DDB_STREAMS.values():
if stream["StreamArn"] == data["StreamArn"]:
result = {"StreamDescription": stream}
# get stream details
dynamodb = aws_stack.connect_to_service("dynamodb")
table_name = table_name_from_stream_arn(stream["StreamArn"])
stream_name = get_kinesis_stream_name(table_name)
stream_details = kinesis.describe_stream(StreamName=stream_name)
table_details = dynamodb.describe_table(TableName=table_name)
stream["KeySchema"] = table_details["Table"]["KeySchema"]
stream["Shards"] = stream_details["StreamDescription"]["Shards"]
break
elif action == "%s.GetShardIterator" % ACTION_HEADER_PREFIX:
# forward request to Kinesis API
stream_name = stream_name_from_stream_arn(data["StreamArn"])
result = kinesis.get_shard_iterator(
StreamName=stream_name,
ShardId=data["ShardId"],
ShardIteratorType=data["ShardIteratorType"],
)
elif action == "%s.GetRecords" % ACTION_HEADER_PREFIX:
kinesis_records = kinesis.get_records(**data)
result = {"Records": []}
for record in kinesis_records["Records"]:
result["Records"].append(json.loads(to_str(record["Data"])))
else:
print('WARNING: Unknown operation "%s"' % action)
return jsonify(result)
|
https://github.com/localstack/localstack/issues/219
|
$ localstack start
Starting local dev environment. CTRL-C to quit.
Error starting infrastructure: 'zipimport.zipimporter' object has no attribute 'path'
Traceback (most recent call last):
File ".../bin/localstack", line 86, in <module>
infra.start_infra()
File ".../lib/python3.6/site-packages/localstack/services/infra.py", line 362, in start_infra
raise e
File ".../lib/python3.6/site-packages/localstack/services/infra.py", line 306, in start_infra
load_plugins()
File ".../lib/python3.6/site-packages/localstack/services/infra.py", line 100, in load_plugins
file_path = '%s/%s/plugins.py' % (module[0].path, module[1])
AttributeError: 'zipimport.zipimporter' object has no attribute 'path'
|
AttributeError
|
def forward(self, method):
path = self.path
if "://" in path:
path = "/" + path.split("://", 1)[1].split("/", 1)[1]
proxy_url = "http://%s%s" % (self.proxy.forward_host, path)
target_url = self.path
if "://" not in target_url:
target_url = "http://%s%s" % (self.proxy.forward_host, target_url)
data = None
if method in ["POST", "PUT", "PATCH"]:
data_string = self.data_bytes
try:
if not isinstance(data_string, string_types):
data_string = data_string.decode(DEFAULT_ENCODING)
data = json.loads(data_string)
except Exception as e:
# unable to parse JSON, fallback to verbatim string/bytes
data = data_string
forward_headers = CaseInsensitiveDict(self.headers)
# update original "Host" header
forward_headers["host"] = urlparse(target_url).netloc
try:
response = None
modified_request = None
# update listener (pre-invocation)
if self.proxy.update_listener:
listener_result = self.proxy.update_listener(
method=method,
path=path,
data=data,
headers=forward_headers,
return_forward_info=True,
)
if isinstance(listener_result, Response):
response = listener_result
elif isinstance(listener_result, Request):
modified_request = listener_result
data = modified_request.data
forward_headers = modified_request.headers
elif listener_result is not True:
# get status code from response, or use Bad Gateway status code
code = listener_result if isinstance(listener_result, int) else 503
self.send_response(code)
self.end_headers()
return
if response is None:
if modified_request:
response = self.method(
proxy_url,
data=modified_request.data,
headers=modified_request.headers,
)
else:
response = self.method(
proxy_url, data=self.data_bytes, headers=forward_headers
)
# update listener (post-invocation)
if self.proxy.update_listener:
updated_response = self.proxy.update_listener(
method=method,
path=path,
data=data,
headers=forward_headers,
response=response,
)
if isinstance(updated_response, Response):
response = updated_response
# copy headers and return response
self.send_response(response.status_code)
for header_key, header_value in iteritems(response.headers):
self.send_header(header_key, header_value)
self.end_headers()
self.wfile.write(bytes_(response.content))
except Exception as e:
if not self.proxy.quiet:
LOGGER.exception("Error forwarding request: %s" % str(e))
|
def forward(self, method):
path = self.path
if "://" in path:
path = "/" + path.split("://", 1)[1].split("/", 1)[1]
proxy_url = "http://%s%s" % (self.proxy.forward_host, path)
target_url = self.path
if "://" not in target_url:
target_url = "http://%s%s" % (self.proxy.forward_host, target_url)
data = None
if method in ["POST", "PUT", "PATCH"]:
data_string = self.data_bytes
try:
if not isinstance(data_string, string_types):
data_string = data_string.decode(DEFAULT_ENCODING)
data = json.loads(data_string)
except Exception as e:
# unable to parse JSON, fallback to verbatim string/bytes
data = data_string
forward_headers = dict(self.headers)
# update original "Host" header
forward_headers["host"] = urlparse(target_url).netloc
try:
response = None
modified_request = None
# update listener (pre-invocation)
if self.proxy.update_listener:
listener_result = self.proxy.update_listener(
method=method,
path=path,
data=data,
headers=forward_headers,
return_forward_info=True,
)
if isinstance(listener_result, Response):
response = listener_result
elif isinstance(listener_result, Request):
modified_request = listener_result
elif listener_result is not True:
# get status code from response, or use Bad Gateway status code
code = listener_result if isinstance(listener_result, int) else 503
self.send_response(code)
self.end_headers()
return
if response is None:
if modified_request:
response = self.method(
proxy_url,
data=modified_request.data,
headers=modified_request.headers,
)
else:
response = self.method(
proxy_url, data=self.data_bytes, headers=forward_headers
)
# update listener (post-invocation)
if self.proxy.update_listener:
updated_response = self.proxy.update_listener(
method=method,
path=path,
data=data,
headers=self.headers,
response=response,
)
if isinstance(updated_response, Response):
response = updated_response
# copy headers and return response
self.send_response(response.status_code)
for header_key, header_value in iteritems(response.headers):
self.send_header(header_key, header_value)
self.end_headers()
self.wfile.write(bytes_(response.content))
except Exception as e:
if not self.proxy.quiet:
LOGGER.exception("Error forwarding request: %s" % str(e))
|
https://github.com/localstack/localstack/issues/83
|
ERROR:localstack.mock.generic_proxy:Error forwarding request: Traceback (most recent call last):
File "/opt/code/localstack/localstack/mock/generic_proxy.py", line 88, in forward
data=data, headers=self.headers, return_forward_info=True)
File "/opt/code/localstack/localstack/mock/proxy/sns_listener.py", line 37, in update_sns
queue_url = aws_stack.get_sqs_queue_url(queue_name)
File "/opt/code/localstack/localstack/utils/aws/aws_stack.py", line 285, in get_sqs_queue_url
response = client.get_queue_url(QueueName=queue_name)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 253, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 531, in _make_api_call
operation_model, request_dict)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 141, in make_request
return self._send_request(request_dict, operation_model)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 168, in _send_request
request, operation_model, attempts)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 233, in _get_response
response_dict, operation_model.output_shape)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 210, in parse
parsed = self._do_error_parse(response, shape)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 438, in _do_error_parse
root = self._parse_xml_string_to_dom(xml_contents)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 390, in _parse_xml_string_to_dom
"invalid XML received:\n%s" % (e, xml_string))
ResponseParserError: Unable to parse response (syntax error: line 1, column 54), invalid XML received:
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<title>500 Internal Server Error</title>
<h1>Internal Server Error</h1>
<p>The server encountered an internal error and was unable to complete your request. Either the server is overloaded or there is an error in the application.</p>
|
ResponseParserError
|
def update_cloudformation(
method, path, data, headers, response=None, return_forward_info=False
):
req_data = None
if method == "POST" and path == "/":
req_data = urlparse.parse_qs(data)
action = req_data.get("Action")[0]
if return_forward_info:
if req_data:
if action == "CreateChangeSet":
return create_change_set(req_data)
elif action == "DescribeChangeSet":
return describe_change_set(req_data)
elif action == "ExecuteChangeSet":
return execute_change_set(req_data)
elif action == "UpdateStack" and req_data.get("TemplateURL"):
# Temporary fix until the moto CF backend can handle TemplateURL (currently fails)
url = re.sub(
r"https?://s3\.amazonaws\.com",
aws_stack.get_local_service_url("s3"),
req_data.get("TemplateURL")[0],
)
req_data["TemplateBody"] = requests.get(url).content
modified_data = urlparse.urlencode(req_data, doseq=True)
return Request(data=modified_data, headers=headers, method=method)
return True
if req_data:
if action == "DescribeStackResources":
if response.status_code < 300:
response_dict = xmltodict.parse(response.content)[
"DescribeStackResourcesResponse"
]
resources = response_dict["DescribeStackResourcesResult"][
"StackResources"
]
if not resources:
# Check if stack exists
stack_name = req_data.get("StackName")[0]
cloudformation_client = aws_stack.connect_to_service(
"cloudformation"
)
try:
cloudformation_client.describe_stacks(StackName=stack_name)
except Exception as e:
return error_response(
"Stack with id %s does not exist" % stack_name, code=404
)
if action == "DescribeStackResource":
if response.status_code >= 500:
# fix an error in moto where it fails with 500 if the stack does not exist
return error_response("Stack resource does not exist", code=404)
elif action == "CreateStack" or action == "UpdateStack":
# run the actual deployment
template = template_deployer.template_to_json(
req_data.get("TemplateBody")[0]
)
template_deployer.deploy_template(template, req_data.get("StackName")[0])
if response.status_code >= 400:
return make_response(action)
|
def update_cloudformation(
method, path, data, headers, response=None, return_forward_info=False
):
req_data = None
if method == "POST" and path == "/":
req_data = urlparse.parse_qs(data)
action = req_data.get("Action")[0]
if return_forward_info:
if req_data:
if action == "CreateChangeSet":
return create_change_set(req_data)
elif action == "DescribeChangeSet":
return describe_change_set(req_data)
elif action == "ExecuteChangeSet":
return execute_change_set(req_data)
return True
if req_data:
if action == "DescribeStackResources":
response_dict = xmltodict.parse(response.content)[
"DescribeStackResourcesResponse"
]
resources = response_dict["DescribeStackResourcesResult"]["StackResources"]
if not resources:
# TODO: check if stack exists
return error_response("Stack does not exist", code=404)
if action == "DescribeStackResource":
if response.status_code >= 500:
# fix an error in moto where it fails with 500 if the stack does not exist
return error_response("Stack resource does not exist", code=404)
elif action == "CreateStack" or action == "UpdateStack":
if response.status_code in range(200, 300):
# run the actual deployment
template = template_deployer.template_to_json(
req_data.get("TemplateBody")[0]
)
template_deployer.deploy_template(template)
|
https://github.com/localstack/localstack/issues/83
|
ERROR:localstack.mock.generic_proxy:Error forwarding request: Traceback (most recent call last):
File "/opt/code/localstack/localstack/mock/generic_proxy.py", line 88, in forward
data=data, headers=self.headers, return_forward_info=True)
File "/opt/code/localstack/localstack/mock/proxy/sns_listener.py", line 37, in update_sns
queue_url = aws_stack.get_sqs_queue_url(queue_name)
File "/opt/code/localstack/localstack/utils/aws/aws_stack.py", line 285, in get_sqs_queue_url
response = client.get_queue_url(QueueName=queue_name)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 253, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 531, in _make_api_call
operation_model, request_dict)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 141, in make_request
return self._send_request(request_dict, operation_model)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 168, in _send_request
request, operation_model, attempts)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 233, in _get_response
response_dict, operation_model.output_shape)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 210, in parse
parsed = self._do_error_parse(response, shape)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 438, in _do_error_parse
root = self._parse_xml_string_to_dom(xml_contents)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 390, in _parse_xml_string_to_dom
"invalid XML received:\n%s" % (e, xml_string))
ResponseParserError: Unable to parse response (syntax error: line 1, column 54), invalid XML received:
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<title>500 Internal Server Error</title>
<h1>Internal Server Error</h1>
<p>The server encountered an internal error and was unable to complete your request. Either the server is overloaded or there is an error in the application.</p>
|
ResponseParserError
|
def update_dynamodb(
method, path, data, headers, response=None, return_forward_info=False
):
if return_forward_info:
if random.random() < config.DYNAMODB_ERROR_PROBABILITY:
return dynamodb_error_response(data)
return True
# update table definitions
if data and "TableName" in data and "KeySchema" in data:
TABLE_DEFINITIONS[data["TableName"]] = data
action = headers.get("X-Amz-Target")
if not action:
return
response_data = json.loads(to_str(response.content))
record = {
"eventID": "1",
"eventVersion": "1.0",
"dynamodb": {
"StreamViewType": "NEW_AND_OLD_IMAGES",
"SequenceNumber": "1",
"SizeBytes": -1,
},
"awsRegion": DEFAULT_REGION,
"eventSource": "aws:dynamodb",
}
event = {"Records": [record]}
if action == "DynamoDB_20120810.UpdateItem":
req = {"TableName": data["TableName"], "Key": data["Key"]}
new_item = aws_stack.dynamodb_get_item_raw(req)
if "Item" not in new_item:
if "message" in new_item:
ddb_client = aws_stack.connect_to_service("dynamodb")
table_names = ddb_client.list_tables()["TableNames"]
msg = "Unable to get item from DynamoDB (existing tables: %s): %s" % (
table_names,
new_item["message"],
)
LOGGER.warning(msg)
return
record["eventName"] = "MODIFY"
record["dynamodb"]["Keys"] = data["Key"]
record["dynamodb"]["NewImage"] = new_item["Item"]
elif action == "DynamoDB_20120810.PutItem":
record["eventName"] = "INSERT"
keys = dynamodb_extract_keys(item=data["Item"], table_name=data["TableName"])
record["dynamodb"]["Keys"] = keys
record["dynamodb"]["NewImage"] = data["Item"]
elif action == "DynamoDB_20120810.DeleteItem":
record["eventName"] = "REMOVE"
record["dynamodb"]["Keys"] = data["Key"]
elif action == "DynamoDB_20120810.CreateTable":
if "StreamSpecification" in data:
stream = data["StreamSpecification"]
enabled = stream.get("StreamEnabled")
if enabled not in [False, "False"]:
table_name = data["TableName"]
view_type = stream["StreamViewType"]
dynamodbstreams_api.add_dynamodb_stream(
table_name=table_name, view_type=view_type, enabled=enabled
)
return
else:
# nothing to do
return
record["eventSourceARN"] = aws_stack.dynamodb_table_arn(data["TableName"])
sources = lambda_api.get_event_sources(source_arn=record["eventSourceARN"])
if len(sources) > 0:
pass
for src in sources:
func_to_call = lambda_api.lambda_arn_to_function[src["FunctionArn"]]
lambda_api.run_lambda(
func_to_call, event=event, context={}, func_arn=src["FunctionArn"]
)
|
def update_dynamodb(
method, path, data, headers, response=None, return_forward_info=False
):
if return_forward_info:
if random.random() < config.DYNAMODB_ERROR_PROBABILITY:
return dynamodb_error_response(data)
return True
# update table definitions
if data and "TableName" in data and "KeySchema" in data:
TABLE_DEFINITIONS[data["TableName"]] = data
action = headers.get("X-Amz-Target")
if not action:
return
response_data = json.loads(to_str(response.content))
record = {
"eventID": "1",
"eventVersion": "1.0",
"dynamodb": {
"StreamViewType": "NEW_AND_OLD_IMAGES",
"SequenceNumber": "1",
"SizeBytes": -1,
},
"awsRegion": DEFAULT_REGION,
"eventSource": "aws:dynamodb",
}
event = {"Records": [record]}
if action == "DynamoDB_20120810.UpdateItem":
req = {"TableName": data["TableName"], "Key": data["Key"]}
new_item = aws_stack.dynamodb_get_item_raw(req)
if "Item" not in new_item:
if "message" in new_item:
ddb_client = aws_stack.connect_to_service("dynamodb")
table_names = ddb_client.list_tables()["TableNames"]
msg = "Unable to get item from DynamoDB (existing tables: %s): %s" % (
table_names,
new_item["message"],
)
LOGGER.warning(msg)
return
record["eventName"] = "MODIFY"
record["dynamodb"]["Keys"] = data["Key"]
record["dynamodb"]["NewImage"] = new_item["Item"]
elif action == "DynamoDB_20120810.PutItem":
record["eventName"] = "INSERT"
keys = dynamodb_extract_keys(item=data["Item"], table_name=data["TableName"])
record["dynamodb"]["Keys"] = keys
record["dynamodb"]["NewImage"] = data["Item"]
elif action == "DynamoDB_20120810.DeleteItem":
record["eventName"] = "REMOVE"
record["dynamodb"]["Keys"] = data["Key"]
elif action == "DynamoDB_20120810.CreateTable":
if "StreamSpecification" in data:
stream = data["StreamSpecification"]
enabled = stream["StreamEnabled"]
if enabled:
table_name = data["TableName"]
view_type = stream["StreamViewType"]
dynamodbstreams_api.add_dynamodb_stream(
table_name=table_name, view_type=view_type, enabled=enabled
)
return
else:
# nothing to do
return
record["eventSourceARN"] = aws_stack.dynamodb_table_arn(data["TableName"])
sources = lambda_api.get_event_sources(source_arn=record["eventSourceARN"])
if len(sources) > 0:
pass
for src in sources:
func_to_call = lambda_api.lambda_arn_to_function[src["FunctionArn"]]
lambda_api.run_lambda(
func_to_call, event=event, context={}, func_arn=src["FunctionArn"]
)
|
https://github.com/localstack/localstack/issues/83
|
ERROR:localstack.mock.generic_proxy:Error forwarding request: Traceback (most recent call last):
File "/opt/code/localstack/localstack/mock/generic_proxy.py", line 88, in forward
data=data, headers=self.headers, return_forward_info=True)
File "/opt/code/localstack/localstack/mock/proxy/sns_listener.py", line 37, in update_sns
queue_url = aws_stack.get_sqs_queue_url(queue_name)
File "/opt/code/localstack/localstack/utils/aws/aws_stack.py", line 285, in get_sqs_queue_url
response = client.get_queue_url(QueueName=queue_name)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 253, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 531, in _make_api_call
operation_model, request_dict)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 141, in make_request
return self._send_request(request_dict, operation_model)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 168, in _send_request
request, operation_model, attempts)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 233, in _get_response
response_dict, operation_model.output_shape)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 210, in parse
parsed = self._do_error_parse(response, shape)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 438, in _do_error_parse
root = self._parse_xml_string_to_dom(xml_contents)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 390, in _parse_xml_string_to_dom
"invalid XML received:\n%s" % (e, xml_string))
ResponseParserError: Unable to parse response (syntax error: line 1, column 54), invalid XML received:
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<title>500 Internal Server Error</title>
<h1>Internal Server Error</h1>
<p>The server encountered an internal error and was unable to complete your request. Either the server is overloaded or there is an error in the application.</p>
|
ResponseParserError
|
def update_s3(method, path, data, headers, response=None, return_forward_info=False):
if return_forward_info:
modified_data = None
# If this request contains streaming v4 authentication signatures, strip them from the message
# Related isse: https://github.com/atlassian/localstack/issues/98
# TODO we should evaluate whether to replace moto s3 with scality/S3:
# https://github.com/scality/S3/issues/237
if headers.get("x-amz-content-sha256") == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD":
modified_data = strip_chunk_signatures(data)
# persist this API call to disk
persistence.record("s3", method, path, data, headers)
parsed = urlparse.urlparse(path)
query = parsed.query
path = parsed.path
bucket = path.split("/")[1]
query_map = urlparse.parse_qs(query)
if method == "PUT" and (query == "notification" or "notification" in query_map):
tree = ET.fromstring(data)
queue_config = tree.find("{%s}QueueConfiguration" % XMLNS_S3)
if len(queue_config):
S3_NOTIFICATIONS[bucket] = {
"Id": get_xml_text(queue_config, "Id"),
"Event": get_xml_text(queue_config, "Event", ns=XMLNS_S3),
"Queue": get_xml_text(queue_config, "Queue", ns=XMLNS_S3),
"Topic": get_xml_text(queue_config, "Topic", ns=XMLNS_S3),
"CloudFunction": get_xml_text(
queue_config, "CloudFunction", ns=XMLNS_S3
),
}
if query == "cors" or "cors" in query_map:
if method == "GET":
return get_cors(bucket)
if method == "PUT":
return set_cors(bucket, data)
if method == "DELETE":
return delete_cors(bucket)
if modified_data:
return Request(data=modified_data, headers=headers, method=method)
return True
# get subscribers and send bucket notifications
if method in ("PUT", "DELETE") and "/" in path[1:]:
parts = path[1:].split("/", 1)
bucket_name = parts[0]
object_path = "/%s" % parts[1]
send_notifications(method, bucket_name, object_path)
# append CORS headers to response
if response:
parsed = urlparse.urlparse(path)
bucket_name = parsed.path.split("/")[0]
append_cors_headers(
bucket_name,
request_method=method,
request_headers=headers,
response=response,
)
|
def update_s3(method, path, data, headers, response=None, return_forward_info=False):
if return_forward_info:
modified_data = None
# If this request contains streaming v4 authentication signatures, strip them from the message
# Related isse: https://github.com/atlassian/localstack/issues/98
# TODO we should evaluate whether to replace moto s3 with scality/S3:
# https://github.com/scality/S3/issues/237
if headers.get("x-amz-content-sha256") == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD":
modified_data = strip_chunk_signatures(data)
# persist this API call to disk
persistence.record("s3", method, path, data, headers)
parsed = urlparse.urlparse(path)
query = parsed.query
path = parsed.path
bucket = path.split("/")[1]
query_map = urlparse.parse_qs(query)
if method == "PUT" and (query == "notification" or "notification" in query_map):
tree = ET.fromstring(data)
queue_config = tree.find("{%s}QueueConfiguration" % XMLNS_S3)
if len(queue_config):
S3_NOTIFICATIONS[bucket] = {
"Id": get_xml_text(queue_config, "Id"),
"Event": get_xml_text(queue_config, "Event", ns=XMLNS_S3),
"Queue": get_xml_text(queue_config, "Queue", ns=XMLNS_S3),
"Topic": get_xml_text(queue_config, "Topic", ns=XMLNS_S3),
"CloudFunction": get_xml_text(
queue_config, "CloudFunction", ns=XMLNS_S3
),
}
if query == "cors" or "cors" in query_map:
if method == "GET":
return get_cors(bucket)
if method == "PUT":
return set_cors(bucket, data)
if method == "DELETE":
return delete_cors(bucket)
if modified_data:
return Request(data=modified_data, headers=headers)
return True
# get subscribers and send bucket notifications
if method in ("PUT", "DELETE") and "/" in path[1:]:
parts = path[1:].split("/", 1)
bucket_name = parts[0]
object_path = "/%s" % parts[1]
send_notifications(method, bucket_name, object_path)
# append CORS headers to response
if response:
parsed = urlparse.urlparse(path)
bucket_name = parsed.path.split("/")[0]
append_cors_headers(
bucket_name,
request_method=method,
request_headers=headers,
response=response,
)
|
https://github.com/localstack/localstack/issues/83
|
ERROR:localstack.mock.generic_proxy:Error forwarding request: Traceback (most recent call last):
File "/opt/code/localstack/localstack/mock/generic_proxy.py", line 88, in forward
data=data, headers=self.headers, return_forward_info=True)
File "/opt/code/localstack/localstack/mock/proxy/sns_listener.py", line 37, in update_sns
queue_url = aws_stack.get_sqs_queue_url(queue_name)
File "/opt/code/localstack/localstack/utils/aws/aws_stack.py", line 285, in get_sqs_queue_url
response = client.get_queue_url(QueueName=queue_name)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 253, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 531, in _make_api_call
operation_model, request_dict)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 141, in make_request
return self._send_request(request_dict, operation_model)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 168, in _send_request
request, operation_model, attempts)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 233, in _get_response
response_dict, operation_model.output_shape)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 210, in parse
parsed = self._do_error_parse(response, shape)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 438, in _do_error_parse
root = self._parse_xml_string_to_dom(xml_contents)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 390, in _parse_xml_string_to_dom
"invalid XML received:\n%s" % (e, xml_string))
ResponseParserError: Unable to parse response (syntax error: line 1, column 54), invalid XML received:
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<title>500 Internal Server Error</title>
<h1>Internal Server Error</h1>
<p>The server encountered an internal error and was unable to complete your request. Either the server is overloaded or there is an error in the application.</p>
|
ResponseParserError
|
def get_client(resource):
resource_type = get_resource_type(resource)
service = get_service_name(resource)
resource_config = RESOURCE_TO_FUNCTION.get(resource_type)
if resource_config is None:
raise Exception(
"CloudFormation deployment for resource type %s not yet implemented"
% resource_type
)
if ACTION_CREATE not in resource_config:
# nothing to do for this resource
return
try:
if resource_config[ACTION_CREATE].get("boto_client") == "resource":
return aws_stack.connect_to_resource(service)
return aws_stack.connect_to_service(service)
except Exception as e:
LOGGER.warning(
'Unable to get client for "%s" API, skipping deployment.' % service
)
return None
|
def get_client(resource):
resource_type = get_resource_type(resource)
service = get_service_name(resource)
if (
RESOURCE_TO_FUNCTION[resource_type][ACTION_CREATE].get("boto_client")
== "resource"
):
return aws_stack.connect_to_resource(service)
return aws_stack.connect_to_service(service)
|
https://github.com/localstack/localstack/issues/83
|
ERROR:localstack.mock.generic_proxy:Error forwarding request: Traceback (most recent call last):
File "/opt/code/localstack/localstack/mock/generic_proxy.py", line 88, in forward
data=data, headers=self.headers, return_forward_info=True)
File "/opt/code/localstack/localstack/mock/proxy/sns_listener.py", line 37, in update_sns
queue_url = aws_stack.get_sqs_queue_url(queue_name)
File "/opt/code/localstack/localstack/utils/aws/aws_stack.py", line 285, in get_sqs_queue_url
response = client.get_queue_url(QueueName=queue_name)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 253, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 531, in _make_api_call
operation_model, request_dict)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 141, in make_request
return self._send_request(request_dict, operation_model)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 168, in _send_request
request, operation_model, attempts)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 233, in _get_response
response_dict, operation_model.output_shape)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 210, in parse
parsed = self._do_error_parse(response, shape)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 438, in _do_error_parse
root = self._parse_xml_string_to_dom(xml_contents)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 390, in _parse_xml_string_to_dom
"invalid XML received:\n%s" % (e, xml_string))
ResponseParserError: Unable to parse response (syntax error: line 1, column 54), invalid XML received:
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<title>500 Internal Server Error</title>
<h1>Internal Server Error</h1>
<p>The server encountered an internal error and was unable to complete your request. Either the server is overloaded or there is an error in the application.</p>
|
ResponseParserError
|
def deploy_resource(resource_id, resources, stack_name):
resource = resources[resource_id]
client = get_client(resource)
if not client:
return False
resource_type = get_resource_type(resource)
func_details = RESOURCE_TO_FUNCTION.get(resource_type)
if not func_details:
LOGGER.warning("Resource type not yet implemented: %s" % resource["Type"])
return
func_details = func_details[ACTION_CREATE]
function = getattr(client, func_details["function"])
params = dict(func_details["parameters"])
defaults = func_details.get("defaults", {})
if "Properties" not in resource:
resource["Properties"] = {}
# print('deploying', resource_id, resource_type)
for param_key, prop_keys in iteritems(dict(params)):
params.pop(param_key, None)
if not isinstance(prop_keys, list):
prop_keys = [prop_keys]
for prop_key in prop_keys:
if prop_key == PLACEHOLDER_RESOURCE_NAME:
# obtain physical resource name from stack resources
params[param_key] = resolve_ref(
stack_name, resource_id, resources, attribute="PhysicalResourceId"
)
else:
prop_value = resource["Properties"].get(prop_key)
if prop_value is not None:
params[param_key] = prop_value
tmp_value = params.get(param_key)
if tmp_value is not None:
params[param_key] = resolve_refs_recursively(
stack_name, tmp_value, resources
)
break
# hack: convert to boolean
if params.get(param_key) in ["True", "False"]:
params[param_key] = params.get(param_key) == "True"
# assign default value if empty
params = common.merge_recursive(defaults, params)
# invoke function
try:
result = function(**params)
except Exception as e:
LOGGER.warning(
"Error calling %s with params: %s for resource: %s"
% (function, params, resource)
)
raise e
# update status
set_status_deployed(resource_id, resource, stack_name)
return result
|
def deploy_resource(resource):
client = get_client(resource)
resource_type = get_resource_type(resource)
func_details = RESOURCE_TO_FUNCTION.get(resource_type)
if not func_details:
LOGGER.warning("Resource type not yet implemented: %s" % resource["Type"])
return
func_details = func_details[ACTION_CREATE]
function = getattr(client, func_details["function"])
params = dict(func_details["parameters"])
if "Properties" not in resource:
resource["Properties"] = {}
for param_key, prop_key in iteritems(params):
params[param_key] = resource["Properties"].get(prop_key)
# invoke function
return function(**params)
|
https://github.com/localstack/localstack/issues/83
|
ERROR:localstack.mock.generic_proxy:Error forwarding request: Traceback (most recent call last):
File "/opt/code/localstack/localstack/mock/generic_proxy.py", line 88, in forward
data=data, headers=self.headers, return_forward_info=True)
File "/opt/code/localstack/localstack/mock/proxy/sns_listener.py", line 37, in update_sns
queue_url = aws_stack.get_sqs_queue_url(queue_name)
File "/opt/code/localstack/localstack/utils/aws/aws_stack.py", line 285, in get_sqs_queue_url
response = client.get_queue_url(QueueName=queue_name)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 253, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 531, in _make_api_call
operation_model, request_dict)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 141, in make_request
return self._send_request(request_dict, operation_model)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 168, in _send_request
request, operation_model, attempts)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 233, in _get_response
response_dict, operation_model.output_shape)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 210, in parse
parsed = self._do_error_parse(response, shape)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 438, in _do_error_parse
root = self._parse_xml_string_to_dom(xml_contents)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 390, in _parse_xml_string_to_dom
"invalid XML received:\n%s" % (e, xml_string))
ResponseParserError: Unable to parse response (syntax error: line 1, column 54), invalid XML received:
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<title>500 Internal Server Error</title>
<h1>Internal Server Error</h1>
<p>The server encountered an internal error and was unable to complete your request. Either the server is overloaded or there is an error in the application.</p>
|
ResponseParserError
|
def deploy_template(template, stack_name):
if isinstance(template, string_types):
template = parse_template(template)
if MARKER_DONT_REDEPLOY_STACK in template:
# If we are currently deploying, then bail. This can occur if
# deploy_template(..) method calls boto's update_stack(..) (to update the
# state of resources) which itself triggers another call to deploy_template(..).
# We don't want to end up in an infinite/recursive deployment loop.
return
resource_map = template.get("Resources")
if not resource_map:
LOGGER.warning("CloudFormation template contains no Resources section")
return
next = resource_map
# resource_list = resource_map.values()
iters = 3
for i in range(0, iters):
# print('deployment iteration', i)
# get resource details
for resource_id, resource in iteritems(next):
resource["__details__"] = describe_stack_resources(stack_name, resource_id)[
0
]
next = resources_to_deploy_next(resource_map, stack_name)
if not next:
return
for resource_id, resource in iteritems(next):
deploy_resource(resource_id, resource_map, stack_name=stack_name)
LOGGER.warning(
"Unable to resolve all dependencies and deploy all resources "
+ "after %s iterations. Remaining (%s): %s" % (iters, len(next), next)
)
|
def deploy_template(template):
if isinstance(template, string_types):
template = parse_template(template)
for key, resource in iteritems(template["Resources"]):
deploy_resource(resource)
|
https://github.com/localstack/localstack/issues/83
|
ERROR:localstack.mock.generic_proxy:Error forwarding request: Traceback (most recent call last):
File "/opt/code/localstack/localstack/mock/generic_proxy.py", line 88, in forward
data=data, headers=self.headers, return_forward_info=True)
File "/opt/code/localstack/localstack/mock/proxy/sns_listener.py", line 37, in update_sns
queue_url = aws_stack.get_sqs_queue_url(queue_name)
File "/opt/code/localstack/localstack/utils/aws/aws_stack.py", line 285, in get_sqs_queue_url
response = client.get_queue_url(QueueName=queue_name)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 253, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 531, in _make_api_call
operation_model, request_dict)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 141, in make_request
return self._send_request(request_dict, operation_model)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 168, in _send_request
request, operation_model, attempts)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/endpoint.py", line 233, in _get_response
response_dict, operation_model.output_shape)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 210, in parse
parsed = self._do_error_parse(response, shape)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 438, in _do_error_parse
root = self._parse_xml_string_to_dom(xml_contents)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/parsers.py", line 390, in _parse_xml_string_to_dom
"invalid XML received:\n%s" % (e, xml_string))
ResponseParserError: Unable to parse response (syntax error: line 1, column 54), invalid XML received:
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<title>500 Internal Server Error</title>
<h1>Internal Server Error</h1>
<p>The server encountered an internal error and was unable to complete your request. Either the server is overloaded or there is an error in the application.</p>
|
ResponseParserError
|
def send_command():
"""
Run a remote command. This is called via py3-cmd utility.
We look for any uds sockets with the correct name prefix and send our
command to all that we find. This allows us to communicate with multiple
py3status instances.
"""
def verbose(msg):
"""
print output if verbose is set.
"""
if options.verbose:
print(msg)
options = command_parser()
msg = json.dumps(vars(options)).encode("utf-8")
if len(msg) > MAX_SIZE:
verbose(f"Message length too long, max length ({MAX_SIZE})")
# find all likely socket addresses
uds_list = Path(SERVER_ADDRESS).parent.glob(f"{Path(SERVER_ADDRESS).name}.[0-9]*")
verbose(f"message {msg!r}")
for uds in uds_list:
# Create a UDS socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
verbose(f"connecting to {uds}")
try:
sock.connect(uds.as_posix())
except OSError:
# this is a stale socket so delete it
verbose("stale socket deleting")
try:
uds.unlink()
except OSError:
pass
continue
try:
# Send data
verbose("sending")
sock.sendall(msg)
finally:
verbose("closing socket")
sock.close()
|
def send_command():
"""
Run a remote command. This is called via py3-cmd utility.
We look for any uds sockets with the correct name prefix and send our
command to all that we find. This allows us to communicate with multiple
py3status instances.
"""
def verbose(msg):
"""
print output if verbose is set.
"""
if options.verbose:
print(msg)
options = command_parser()
msg = json.dumps(vars(options)).encode("utf-8")
if len(msg) > MAX_SIZE:
verbose(f"Message length too long, max length ({MAX_SIZE})")
# find all likely socket addresses
uds_list = Path(".").glob(f"{SERVER_ADDRESS}.[0-9]*")
verbose(f"message {msg!r}")
for uds in uds_list:
# Create a UDS socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
verbose(f"connecting to {uds}")
try:
sock.connect(uds)
except OSError:
# this is a stale socket so delete it
verbose("stale socket deleting")
try:
uds.unlink()
except OSError:
pass
continue
try:
# Send data
verbose("sending")
sock.sendall(msg)
finally:
verbose("closing socket")
sock.close()
|
https://github.com/ultrabug/py3status/issues/2003
|
$ uname -a
Linux eolas 5.10.12-arch1-1 #1 SMP PREEMPT Sun, 31 Jan 2021 00:41:06 +0000 x86_64 GNU/Linux
$ py3-cmd --version
py3status 3.32 (python 3.9.1)
$ py3-cmd refresh --all
Traceback (most recent call last):
File "/usr/bin/py3-cmd", line 13, in <module>
sys.exit(send_command())
File "/usr/lib/python3.9/site-packages/py3status/command.py", line 534, in send_command
for uds in uds_list:
File "/usr/lib/python3.9/pathlib.py", line 1164, in glob
raise NotImplementedError("Non-relative patterns are unsupported")
NotImplementedError: Non-relative patterns are unsupported
|
NotImplementedError
|
def _add_player(self, player_id):
"""
Add player to mpris_players
"""
if not player_id.startswith(SERVICE_BUS):
return False
player = self._dbus.get(player_id, SERVICE_BUS_URL)
if player.Identity not in self._mpris_names:
self._mpris_names[player.Identity] = player_id.split(".")[-1]
for p in self._mpris_players.values():
if not p["name"] and p["identity"] in self._mpris_names:
p["name"] = self._mpris_names[p["identity"]]
p["full_name"] = "{} {}".format(p["name"], p["index"])
identity = player.Identity
name = self._mpris_names.get(identity)
if (
self.player_priority != []
and name not in self.player_priority
and "*" not in self.player_priority
):
return False
if identity not in self._mpris_name_index:
self._mpris_name_index[identity] = 0
status = player.PlaybackStatus
state_priority = WORKING_STATES.index(status)
index = self._mpris_name_index[identity]
self._mpris_name_index[identity] += 1
try:
subscription = player.PropertiesChanged.connect(self._player_monitor(player_id))
except AttributeError:
subscription = {}
self._mpris_players[player_id] = {
"_dbus_player": player,
"_id": player_id,
"_state_priority": state_priority,
"index": index,
"identity": identity,
"name": name,
"full_name": "{} {}".format(name, index),
"status": status,
"subscription": subscription,
}
return True
|
def _add_player(self, player_id):
"""
Add player to mpris_players
"""
if not player_id.startswith(SERVICE_BUS):
return False
player = self._dbus.get(player_id, SERVICE_BUS_URL)
if player.Identity not in self._mpris_names:
self._mpris_names[player.Identity] = player_id.split(".")[-1]
for p in self._mpris_players.values():
if not p["name"] and p["identity"] in self._mpris_names:
p["name"] = self._mpris_names[p["identity"]]
p["full_name"] = "{} {}".format(p["name"], p["index"])
identity = player.Identity
name = self._mpris_names.get(identity)
if (
self.player_priority != []
and name not in self.player_priority
and "*" not in self.player_priority
):
return False
if identity not in self._mpris_name_index:
self._mpris_name_index[identity] = 0
status = player.PlaybackStatus
state_priority = WORKING_STATES.index(status)
index = self._mpris_name_index[identity]
self._mpris_name_index[identity] += 1
subscription = player.PropertiesChanged.connect(self._player_monitor(player_id))
self._mpris_players[player_id] = {
"_dbus_player": player,
"_id": player_id,
"_state_priority": state_priority,
"index": index,
"identity": identity,
"name": name,
"full_name": "{} {}".format(name, index),
"status": status,
"subscription": subscription,
}
return True
|
https://github.com/ultrabug/py3status/issues/911
|
lasers~/src/py3status/py3status/modules git:(master) python3 mpris.py.
Traceback (most recent call last):
File "mpris.py", line 578, in <module>
module_test(Py3status)
File "/usr/lib/python3.6/site-packages/py3status-3.6rc1-py3.6.egg/py3status/module_test.py", line 47, in module_test
module.post_config_hook()
File "mpris.py", line 176, in post_config_hook
self._start_listener()
File "mpris.py", line 454, in _start_listener
self._get_players()
File "mpris.py", line 440, in _get_players
self._add_player(player)
File "mpris.py", line 411, in _add_player
subscription = player.PropertiesChanged.connect(self._player_monitor(player_id))
AttributeError: '<CompositeObject>' object has no attribute 'PropertiesChanged'
|
AttributeError
|
def backlight(self):
full_text = ""
if self.device_path is not None:
level = self._get_backlight_level()
full_text = self.py3.safe_format(self.format, {"level": level})
response = {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": full_text,
}
return response
|
def backlight(self):
level = self._get_backlight_level()
full_text = self.py3.safe_format(self.format, {"level": level})
response = {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": full_text,
}
return response
|
https://github.com/ultrabug/py3status/issues/557
|
Traceback (most recent call last):
File "battery_level.py", line 448, in <module>
module_test(Py3status)
File "/usr/lib/python3.5/site-packages/py3status/module_test.py", line 51, in module_test
print(getattr(module, method)(*method_args))
File "battery_level.py", line 174, in battery_level
self._refresh_battery_info()
File "battery_level.py", line 316, in _refresh_battery_info
battery = battery_list[self.battery_id]
IndexError: list index out of range
|
IndexError
|
def battery_level(self):
if not os.listdir(self.sys_battery_path):
return {"full_text": "", "cached_until": self.py3.time_in(self.cache_timeout)}
self._refresh_battery_info()
self._update_icon()
self._update_ascii_bar()
self._update_full_text()
return self._build_response()
|
def battery_level(self):
self._refresh_battery_info()
self._update_icon()
self._update_ascii_bar()
self._update_full_text()
return self._build_response()
|
https://github.com/ultrabug/py3status/issues/557
|
Traceback (most recent call last):
File "battery_level.py", line 448, in <module>
module_test(Py3status)
File "/usr/lib/python3.5/site-packages/py3status/module_test.py", line 51, in module_test
print(getattr(module, method)(*method_args))
File "battery_level.py", line 174, in battery_level
self._refresh_battery_info()
File "battery_level.py", line 316, in _refresh_battery_info
battery = battery_list[self.battery_id]
IndexError: list index out of range
|
IndexError
|
def _config_from_pyproject(path):
if os.path.isfile(path):
try:
with open(path, "r") as f:
pyproject = tomlkit.loads(f.read())
if pyproject:
return pyproject.get("tool", {}).get("semantic_release", {})
except TOMLKitError as e:
logger.debug(f"Could not decode pyproject.toml: {e}")
return {}
|
def _config_from_pyproject(path):
if os.path.isfile(path):
try:
with open(path, "r") as f:
pyproject = tomlkit.loads(f.read())
if pyproject:
return dict(pyproject.get("tool").get("semantic_release"))
except TOMLKitError as e:
logger.debug(f"Could not decode pyproject.toml: {e}")
return {}
|
https://github.com/relekang/python-semantic-release/issues/317
|
Run relekang/python-semantic-release@master
/usr/bin/docker run --name eb9cc679ae0e4f1f83d7f2d23985a34f_6e2f70 --label 442333 --workdir /github/workspace --rm -e INPUT_GITHUB_TOKEN -e INPUT_PYPI_TOKEN -e INPUT_DIRECTORY -e INPUT_PYPI_USERNAME -e INPUT_PYPI_PASSWORD -e HOME -e GITHUB_JOB -e GITHUB_REF -e GITHUB_SHA -e GITHUB_REPOSITORY -e GITHUB_REPOSITORY_OWNER -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RETENTION_DAYS -e GITHUB_ACTOR -e GITHUB_WORKFLOW -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GITHUB_EVENT_NAME -e GITHUB_SERVER_URL -e GITHUB_API_URL -e GITHUB_GRAPHQL_URL -e GITHUB_WORKSPACE -e GITHUB_ACTION -e GITHUB_EVENT_PATH -e GITHUB_ACTION_REPOSITORY -e GITHUB_ACTION_REF -e GITHUB_PATH -e GITHUB_ENV -e RUNNER_OS -e RUNNER_TOOL_CACHE -e RUNNER_TEMP -e RUNNER_WORKSPACE -e ACTIONS_RUNTIME_URL -e ACTIONS_RUNTIME_TOKEN -e ACTIONS_CACHE_URL -e GITHUB_ACTIONS=true -e CI=true -v "/var/run/docker.sock":"/var/run/docker.sock" -v "/home/runner/work/_temp/_github_home":"/github/home" -v "/home/runner/work/_temp/_github_workflow":"/github/workflow" -v "/home/runner/work/_temp/_runner_file_commands":"/github/file_commands" -v "/home/runner/work/arbie/arbie":"/github/workspace" 442333:eb9cc679ae0e4f1f83d7f2d23985a34f
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/semantic-release/semantic_release/__main__.py", line 1, in <module>
from .cli import entry
File "/semantic-release/semantic_release/cli.py", line 13, in <module>
from .changelog import markdown_changelog
File "/semantic-release/semantic_release/changelog/__init__.py", line 4, in <module>
from ..settings import config, current_changelog_components
File "/semantic-release/semantic_release/settings.py", line 77, in <module>
config = _config()
File "/semantic-release/semantic_release/settings.py", line 29, in _config
toml_config = _config_from_pyproject(toml_path)
File "/semantic-release/semantic_release/settings.py", line 70, in _config_from_pyproject
return dict(pyproject.get("tool").get("semantic_release"))
TypeError: 'NoneType' object is not iterable
|
TypeError
|
def commit_new_version(version: str):
"""
Commits the file containing the version number variable with the version number as the commit
message.
:param version: The version number to be used in the commit message.
"""
check_repo()
commit_message = config.get("semantic_release", "commit_message")
message = "{0}\n\n{1}".format(version, commit_message)
version_file = config.get("semantic_release", "version_variable").split(":")[0]
# get actual path to filename, to allow running cmd from subdir of git root
version_filepath = PurePath(os.getcwd(), version_file).relative_to(repo.working_dir)
repo.git.add(str(version_filepath))
return repo.git.commit(m=message, author="semantic-release <semantic-release>")
|
def commit_new_version(version: str):
"""
Commits the file containing the version number variable with the version number as the commit
message.
:param version: The version number to be used in the commit message.
"""
check_repo()
commit_message = config.get("semantic_release", "commit_message")
message = "{0}\n\n{1}".format(version, commit_message)
repo.git.add(config.get("semantic_release", "version_variable").split(":")[0])
return repo.git.commit(m=message, author="semantic-release <semantic-release>")
|
https://github.com/relekang/python-semantic-release/issues/66
|
venv) C:\workspace\checklive\src>semantic-release --patch publish
Traceback (most recent call last):
File "c:\python35\lib\runpy.py", line 170, in _run_module_as_main
"__main__", mod_spec)
File "c:\python35\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\workspace\checklive\venv\Scripts\semantic-release.exe\__main__.py", line 5, in <module>
File "c:\workspace\checklive\venv\lib\site-packages\semantic_release\cli.py", line 9, in <module>
from .history import (evaluate_version_bump, get_current_version, get_new_version,
File "c:\workspace\checklive\venv\lib\site-packages\semantic_release\history\__init__.py", line 6, in <module>
from ..vcs_helpers import get_commit_log, get_last_version
File "c:\workspace\checklive\venv\lib\site-packages\semantic_release\vcs_helpers.py", line 8, in <module>
repo = Repo('.git', search_parent_directories=True)
File "c:\workspace\checklive\venv\lib\site-packages\git\repo\base.py", line 135, in __init__
raise NoSuchPathError(epath)
git.exc.NoSuchPathError: C:\workspace\checklive\src\.git
(venv) C:\workspace\checklive\src>
|
git.exc.NoSuchPathError
|
def __getattr__(self, key):
# TODO keep this for a version, in order to not break old code
# this entire method (as well as the _dict attribute in __slots__ and the __setattr__ method)
# can be removed in 4.0
# this method is only called if the attribute was not found elsewhere, like in __slots_
if key not in self.__slots__:
raise AttributeError
try:
warnings.warn(
"Custom attributes of messages are deprecated and will be removed in 4.0",
DeprecationWarning,
)
return self._dict[key]
except KeyError:
raise AttributeError("'message' object has no attribute '{}'".format(key))
|
def __getattr__(self, key):
# TODO keep this for a version, in order to not break old code
# this entire method (as well as the _dict attribute in __slots__ and the __setattr__ method)
# can be removed in 4.0
# this method is only called if the attribute was not found elsewhere, like in __slots__
try:
warnings.warn(
"Custom attributes of messages are deprecated and will be removed in 4.0",
DeprecationWarning,
)
return self._dict[key]
except KeyError:
raise AttributeError("'message' object has no attribute '{}'".format(key))
|
https://github.com/hardbyte/python-can/issues/804
|
/usr/bin/python3.7 /XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/SerializationTest.py
2020-04-02 13:35:59,352:MainThread:INFO: Created a socket
2020-04-02 13:35:59,389:MainThread:INFO: pickled
Traceback (most recent call last):
File "/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/SerializationTest.py", line 39, in <module>
canReader.run()
File "/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/SerializationTest.py", line 29, in run
unpick = pickle.loads(pick)
File "/home/XXXXXX/.local/lib/python3.7/site-packages/can/message.py", line 59, in __getattr__
return self._dict[key]
File "/home/XXXXXX/.local/lib/python3.7/site-packages/can/message.py", line 59, in __getattr__
return self._dict[key]
File "/home/XXXXXX/.local/lib/python3.7/site-packages/can/message.py", line 59, in __getattr__
return self._dict[key]
[Previous line repeated 494 more times]
File "/home/XXXXXX/.local/lib/python3.7/site-packages/can/message.py", line 58, in __getattr__
warnings.warn("Custom attributes of messages are deprecated and will be removed in 4.0", DeprecationWarning)
RecursionError: maximum recursion depth exceeded while calling a Python object
Process finished with exit code 1
|
RecursionError
|
def build_bcm_header(
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
can_id,
nframes,
):
result = BcmMsgHead(
opcode=opcode,
flags=flags,
count=count,
ival1_tv_sec=ival1_seconds,
ival1_tv_usec=ival1_usec,
ival2_tv_sec=ival2_seconds,
ival2_tv_usec=ival2_usec,
can_id=can_id,
nframes=nframes,
)
return ctypes.string_at(ctypes.addressof(result), ctypes.sizeof(result))
|
def build_bcm_header(
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
can_id,
nframes,
):
# == Must use native not standard types for packing ==
# struct bcm_msg_head {
# __u32 opcode; -> I
# __u32 flags; -> I
# __u32 count; -> I
# struct timeval ival1, ival2; -> llll ...
# canid_t can_id; -> I
# __u32 nframes; -> I
bcm_cmd_msg_fmt = "@3I4l2I0q"
return struct.pack(
bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
can_id,
nframes,
)
|
https://github.com/hardbyte/python-can/issues/470
|
Traceback (most recent call last):
File "./test.py", line 8, in <module>
bus.send_periodic(msg, 0.01, 9)
File "/usr/local/lib/python3.5/dist-packages/can/bus.py", line 201, in send_periodic
task = self._send_periodic_internal(msg, period, duration)
File "/usr/local/lib/python3.5/dist-packages/can/interfaces/socketcan/socketcan.py", line 601, in _send_periodic_internal
task = CyclicSendTask(bcm_socket, msg, period, duration)
File "/usr/local/lib/python3.5/dist-packages/can/interfaces/socketcan/socketcan.py", line 254, in __init__
self._tx_setup(message)
File "/usr/local/lib/python3.5/dist-packages/can/interfaces/socketcan/socketcan.py", line 274, in _tx_setup
send_bcm(self.bcm_socket, header + frame)
File "/usr/local/lib/python3.5/dist-packages/can/interfaces/socketcan/socketcan.py", line 206, in send_bcm
raise can.CanError(base + "You are probably referring to a non-existing frame.")
can.CanError: Couldn't send CAN BCM frame. OS Error 22: Invalid argument
You are probably referring to a non-existing frame.
|
can.CanError
|
def __init__(self, channel, ttyBaudrate=115200, timeout=1, bitrate=None, **kwargs):
"""
:param str channel:
port of underlying serial or usb device (e.g. /dev/ttyUSB0, COM8, ...)
Must not be empty.
:param int ttyBaudrate:
baudrate of underlying serial or usb device
:param int bitrate:
Bitrate in bit/s
:param float poll_interval:
Poll interval in seconds when reading messages
:param float timeout:
timeout in seconds when reading message
"""
if not channel: # if None or empty
raise TypeError("Must specify a serial port.")
if "@" in channel:
(channel, ttyBaudrate) = channel.split("@")
self.serialPortOrig = serial.Serial(channel, baudrate=ttyBaudrate, timeout=timeout)
time.sleep(self._SLEEP_AFTER_SERIAL_OPEN)
if bitrate is not None:
self.close()
if bitrate in self._BITRATES:
self.write(self._BITRATES[bitrate])
else:
raise ValueError(
"Invalid bitrate, choose one of " + (", ".join(self._BITRATES)) + "."
)
self.open()
super(slcanBus, self).__init__(
channel, ttyBaudrate=115200, timeout=1, bitrate=None, **kwargs
)
|
def __init__(self, channel, ttyBaudrate=115200, timeout=1, bitrate=None, **kwargs):
"""
:param str channel:
port of underlying serial or usb device (e.g. /dev/ttyUSB0, COM8, ...)
Must not be empty.
:param int ttyBaudrate:
baudrate of underlying serial or usb device
:param int bitrate:
Bitrate in bit/s
:param float poll_interval:
Poll interval in seconds when reading messages
:param float timeout:
timeout in seconds when reading message
"""
if not channel: # if None or empty
raise TypeError("Must specify a serial port.")
if "@" in channel:
(channel, ttyBaudrate) = channel.split("@")
self.serialPortOrig = serial.Serial(channel, baudrate=ttyBaudrate, timeout=timeout)
self.serialPort = io.TextIOWrapper(
io.BufferedRWPair(self.serialPortOrig, self.serialPortOrig, 1),
newline="\r",
line_buffering=True,
)
time.sleep(self._SLEEP_AFTER_SERIAL_OPEN)
if bitrate is not None:
self.close()
if bitrate in self._BITRATES:
self.write(self._BITRATES[bitrate])
else:
raise ValueError(
"Invalid bitrate, choose one of " + (", ".join(self._BITRATES)) + "."
)
self.open()
super(slcanBus, self).__init__(
channel, ttyBaudrate=115200, timeout=1, bitrate=None, **kwargs
)
|
https://github.com/hardbyte/python-can/issues/382
|
DEBUG:can:loaded can config: {'interface': 'slcan', 'channel': '/dev/ttyUSB0@115200'}
Traceback (most recent call last):
File "encoder-test.py", line 19, in <module>
network.connect(channel='/dev/ttyUSB0@115200', bustype='slcan')
File "/usr/local/lib/python3.5/dist-packages/canopen/network.py", line 100, in connect
self.bus = can.interface.Bus(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/can/interface.py", line 120, in __new__
return cls(channel, *args, **config)
File "/usr/local/lib/python3.5/dist-packages/can/interfaces/slcan.py", line 81, in __init__
self.open()
File "/usr/local/lib/python3.5/dist-packages/can/interfaces/slcan.py", line 93, in open
self.write('O')
File "/usr/local/lib/python3.5/dist-packages/can/interfaces/slcan.py", line 89, in write
self.serialPort.write(string.decode())
AttributeError: 'str' object has no attribute 'decode'
|
AttributeError
|
def write(self, string):
if not string.endswith("\r"):
string += "\r"
self.serialPortOrig.write(string.encode())
self.serialPortOrig.flush()
|
def write(self, string):
if not string.endswith("\r"):
string += "\r"
self.serialPort.write(string.decode())
self.serialPort.flush()
|
https://github.com/hardbyte/python-can/issues/382
|
DEBUG:can:loaded can config: {'interface': 'slcan', 'channel': '/dev/ttyUSB0@115200'}
Traceback (most recent call last):
File "encoder-test.py", line 19, in <module>
network.connect(channel='/dev/ttyUSB0@115200', bustype='slcan')
File "/usr/local/lib/python3.5/dist-packages/canopen/network.py", line 100, in connect
self.bus = can.interface.Bus(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/can/interface.py", line 120, in __new__
return cls(channel, *args, **config)
File "/usr/local/lib/python3.5/dist-packages/can/interfaces/slcan.py", line 81, in __init__
self.open()
File "/usr/local/lib/python3.5/dist-packages/can/interfaces/slcan.py", line 93, in open
self.write('O')
File "/usr/local/lib/python3.5/dist-packages/can/interfaces/slcan.py", line 89, in write
self.serialPort.write(string.decode())
AttributeError: 'str' object has no attribute 'decode'
|
AttributeError
|
def _recv_internal(self, timeout):
if timeout is not None:
self.serialPortOrig.timeout = timeout
canId = None
remote = False
extended = False
frame = []
readStr = self.serialPortOrig.read_until(b"\r")
if not readStr:
return None, False
else:
readStr = readStr.decode()
if readStr[0] == "T":
# extended frame
canId = int(readStr[1:9], 16)
dlc = int(readStr[9])
extended = True
for i in range(0, dlc):
frame.append(int(readStr[10 + i * 2 : 12 + i * 2], 16))
elif readStr[0] == "t":
# normal frame
canId = int(readStr[1:4], 16)
dlc = int(readStr[4])
for i in range(0, dlc):
frame.append(int(readStr[5 + i * 2 : 7 + i * 2], 16))
elif readStr[0] == "r":
# remote frame
canId = int(readStr[1:4], 16)
remote = True
elif readStr[0] == "R":
# remote extended frame
canId = int(readStr[1:9], 16)
extended = True
remote = True
if canId is not None:
msg = Message(
arbitration_id=canId,
extended_id=extended,
timestamp=time.time(), # Better than nothing...
is_remote_frame=remote,
dlc=dlc,
data=frame,
)
return msg, False
else:
return None, False
|
def _recv_internal(self, timeout):
if timeout is not None:
self.serialPortOrig.timeout = timeout
canId = None
remote = False
extended = False
frame = []
readStr = self.serialPort.readline()
if not readStr:
return None, False
else:
if readStr[0] == "T":
# extended frame
canId = int(readStr[1:9], 16)
dlc = int(readStr[9])
extended = True
for i in range(0, dlc):
frame.append(int(readStr[10 + i * 2 : 12 + i * 2], 16))
elif readStr[0] == "t":
# normal frame
canId = int(readStr[1:4], 16)
dlc = int(readStr[4])
for i in range(0, dlc):
frame.append(int(readStr[5 + i * 2 : 7 + i * 2], 16))
elif readStr[0] == "r":
# remote frame
canId = int(readStr[1:4], 16)
remote = True
elif readStr[0] == "R":
# remote extended frame
canId = int(readStr[1:9], 16)
extended = True
remote = True
if canId is not None:
msg = Message(
arbitration_id=canId,
extended_id=extended,
timestamp=time.time(), # Better than nothing...
is_remote_frame=remote,
dlc=dlc,
data=frame,
)
return msg, False
else:
return None, False
|
https://github.com/hardbyte/python-can/issues/382
|
DEBUG:can:loaded can config: {'interface': 'slcan', 'channel': '/dev/ttyUSB0@115200'}
Traceback (most recent call last):
File "encoder-test.py", line 19, in <module>
network.connect(channel='/dev/ttyUSB0@115200', bustype='slcan')
File "/usr/local/lib/python3.5/dist-packages/canopen/network.py", line 100, in connect
self.bus = can.interface.Bus(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/can/interface.py", line 120, in __new__
return cls(channel, *args, **config)
File "/usr/local/lib/python3.5/dist-packages/can/interfaces/slcan.py", line 81, in __init__
self.open()
File "/usr/local/lib/python3.5/dist-packages/can/interfaces/slcan.py", line 93, in open
self.write('O')
File "/usr/local/lib/python3.5/dist-packages/can/interfaces/slcan.py", line 89, in write
self.serialPort.write(string.decode())
AttributeError: 'str' object has no attribute 'decode'
|
AttributeError
|
def __init__(self, filename):
self.fp = open(filename, "rb")
data = self.fp.read(FILE_HEADER_STRUCT.size)
header = FILE_HEADER_STRUCT.unpack(data)
# print(header)
if header[0] != b"LOGG":
raise BLFParseError("Unexpected file format")
self.file_size = header[10]
self.uncompressed_size = header[11]
self.object_count = header[12]
self.start_timestamp = systemtime_to_timestamp(header[14:22])
self.stop_timestamp = systemtime_to_timestamp(header[22:30])
# Read rest of header
self.fp.read(header[1] - FILE_HEADER_STRUCT.size)
|
def __init__(self, filename):
self.fp = open(filename, "rb")
data = self.fp.read(FILE_HEADER_STRUCT.size)
header = FILE_HEADER_STRUCT.unpack(data)
# print(header)
assert header[0] == b"LOGG", "Unknown file format"
self.file_size = header[10]
self.uncompressed_size = header[11]
self.object_count = header[12]
self.start_timestamp = systemtime_to_timestamp(header[14:22])
self.stop_timestamp = systemtime_to_timestamp(header[22:30])
|
https://github.com/hardbyte/python-can/issues/314
|
Traceback (most recent call last):
File "D:\eclipse-workspace\blf2csv\pythoncan_blf2csv.py", line 7, in <module>
logging = list(logging)
File "C:\Users\my_user_name\AppData\Local\Python\Python36\lib\site-packages\can\io\blf.py", line 129, in __iter__
assert header[0] == b"LOBJ", "Parse error"
AssertionError: Parse error
|
AssertionError
|
def __iter__(self):
tail = b""
while True:
data = self.fp.read(OBJ_HEADER_BASE_STRUCT.size)
if not data:
# EOF
break
header = OBJ_HEADER_BASE_STRUCT.unpack(data)
# print(header)
if header[0] != b"LOBJ":
raise BLFParseError()
obj_type = header[4]
obj_data_size = header[3] - OBJ_HEADER_BASE_STRUCT.size
obj_data = self.fp.read(obj_data_size)
# Read padding bytes
self.fp.read(obj_data_size % 4)
if obj_type == LOG_CONTAINER:
method, uncompressed_size = LOG_CONTAINER_STRUCT.unpack_from(obj_data)
container_data = obj_data[LOG_CONTAINER_STRUCT.size :]
if method == NO_COMPRESSION:
data = container_data
elif method == ZLIB_DEFLATE:
data = zlib.decompress(container_data, 15, uncompressed_size)
else:
# Unknown compression method
LOG.warning("Unknown compression method (%d)", method)
continue
if tail:
data = tail + data
pos = 0
while pos + OBJ_HEADER_BASE_STRUCT.size < len(data):
header = OBJ_HEADER_BASE_STRUCT.unpack_from(data, pos)
# print(header)
if header[0] != b"LOBJ":
raise BLFParseError()
obj_size = header[3]
# Calculate position of next object
next_pos = pos + obj_size + (obj_size % 4)
if next_pos > len(data):
# Object continues in next log container
break
pos += OBJ_HEADER_BASE_STRUCT.size
# Read rest of header
header_version = header[2]
if header_version == 1:
flags, _, _, timestamp = OBJ_HEADER_V1_STRUCT.unpack_from(data, pos)
pos += OBJ_HEADER_V1_STRUCT.size
elif header_version == 2:
flags, _, _, timestamp, _ = OBJ_HEADER_V2_STRUCT.unpack_from(
data, pos
)
pos += OBJ_HEADER_V2_STRUCT.size
else:
# Unknown header version
LOG.warning("Unknown object header version (%d)", header_version)
pos = next_pos
continue
if flags == TIME_TEN_MICS:
factor = 10 * 1e-6
else:
factor = 1e-9
timestamp = timestamp * factor + self.start_timestamp
obj_type = header[4]
# Both CAN message types have the same starting content
if obj_type in (CAN_MESSAGE, CAN_MESSAGE2):
(channel, flags, dlc, can_id, can_data) = (
CAN_MSG_STRUCT.unpack_from(data, pos)
)
msg = Message(
timestamp=timestamp,
arbitration_id=can_id & 0x1FFFFFFF,
extended_id=bool(can_id & CAN_MSG_EXT),
is_remote_frame=bool(flags & REMOTE_FLAG),
dlc=dlc,
data=can_data[:dlc],
channel=channel - 1,
)
yield msg
elif obj_type == CAN_FD_MESSAGE:
(channel, flags, dlc, can_id, _, _, fd_flags, _, can_data) = (
CAN_FD_MSG_STRUCT.unpack_from(data, pos)
)
length = dlc2len(dlc)
msg = Message(
timestamp=timestamp,
arbitration_id=can_id & 0x1FFFFFFF,
extended_id=bool(can_id & CAN_MSG_EXT),
is_remote_frame=bool(flags & REMOTE_FLAG),
is_fd=bool(fd_flags & EDL),
bitrate_switch=bool(fd_flags & BRS),
error_state_indicator=bool(fd_flags & ESI),
dlc=length,
data=can_data[:length],
channel=channel - 1,
)
yield msg
elif obj_type == CAN_ERROR_EXT:
(channel, _, _, _, _, dlc, _, can_id, _, can_data) = (
CAN_ERROR_EXT_STRUCT.unpack_from(data, pos)
)
msg = Message(
timestamp=timestamp,
is_error_frame=True,
extended_id=bool(can_id & CAN_MSG_EXT),
arbitration_id=can_id & 0x1FFFFFFF,
dlc=dlc,
data=can_data[:dlc],
channel=channel - 1,
)
yield msg
pos = next_pos
# Save remaing data that could not be processed
tail = data[pos:]
self.fp.close()
|
def __iter__(self):
tail = b""
while True:
data = self.fp.read(OBJ_HEADER_BASE_STRUCT.size)
if not data:
# EOF
break
header = OBJ_HEADER_BASE_STRUCT.unpack(data)
# print(header)
assert header[0] == b"LOBJ", "Parse error"
obj_type = header[4]
obj_data_size = header[3] - OBJ_HEADER_BASE_STRUCT.size
obj_data = self.fp.read(obj_data_size)
# Read padding bytes
self.fp.read(obj_data_size % 4)
if obj_type == LOG_CONTAINER:
method, uncompressed_size = LOG_CONTAINER_STRUCT.unpack_from(obj_data)
container_data = obj_data[LOG_CONTAINER_STRUCT.size :]
if method == NO_COMPRESSION:
data = container_data
elif method == ZLIB_DEFLATE:
data = zlib.decompress(container_data, 15, uncompressed_size)
else:
# Unknown compression method
continue
if tail:
data = tail + data
pos = 0
while pos + OBJ_HEADER_BASE_STRUCT.size < len(data):
header = OBJ_HEADER_BASE_STRUCT.unpack_from(data, pos)
# print(header)
assert header[0] == b"LOBJ", "Parse error"
obj_size = header[3]
if pos + obj_size > len(data):
# Object continues in next log container
break
pos += OBJ_HEADER_BASE_STRUCT.size
# Read rest of header
header += OBJ_HEADER_STRUCT.unpack_from(data, pos)
pos += OBJ_HEADER_STRUCT.size
obj_type = header[4]
timestamp = header[8] * 1e-9 + self.start_timestamp
if obj_type == CAN_MESSAGE:
(channel, flags, dlc, can_id, can_data) = (
CAN_MSG_STRUCT.unpack_from(data, pos)
)
msg = Message(
timestamp=timestamp,
arbitration_id=can_id & 0x1FFFFFFF,
extended_id=bool(can_id & CAN_MSG_EXT),
is_remote_frame=bool(flags & REMOTE_FLAG),
dlc=dlc,
data=can_data[:dlc],
channel=channel - 1,
)
yield msg
elif obj_type == CAN_FD_MESSAGE:
(channel, flags, dlc, can_id, _, _, fd_flags, _, can_data) = (
CAN_FD_MSG_STRUCT.unpack_from(data, pos)
)
length = dlc2len(dlc)
msg = Message(
timestamp=timestamp,
arbitration_id=can_id & 0x1FFFFFFF,
extended_id=bool(can_id & CAN_MSG_EXT),
is_remote_frame=bool(flags & REMOTE_FLAG),
is_fd=bool(fd_flags & EDL),
bitrate_switch=bool(fd_flags & BRS),
error_state_indicator=bool(fd_flags & ESI),
dlc=length,
data=can_data[:length],
channel=channel - 1,
)
yield msg
elif obj_type == CAN_ERROR_EXT:
(channel, _, _, _, _, dlc, _, can_id, _, can_data) = (
CAN_ERROR_EXT_STRUCT.unpack_from(data, pos)
)
msg = Message(
timestamp=timestamp,
is_error_frame=True,
extended_id=bool(can_id & CAN_MSG_EXT),
arbitration_id=can_id & 0x1FFFFFFF,
dlc=dlc,
data=can_data[:dlc],
channel=channel - 1,
)
yield msg
pos += obj_size - HEADER_SIZE
# Add padding bytes
pos += obj_size % 4
# Save remaing data that could not be processed
tail = data[pos:]
self.fp.close()
|
https://github.com/hardbyte/python-can/issues/314
|
Traceback (most recent call last):
File "D:\eclipse-workspace\blf2csv\pythoncan_blf2csv.py", line 7, in <module>
logging = list(logging)
File "C:\Users\my_user_name\AppData\Local\Python\Python36\lib\site-packages\can\io\blf.py", line 129, in __iter__
assert header[0] == b"LOBJ", "Parse error"
AssertionError: Parse error
|
AssertionError
|
def __init__(self, filename, channel=1):
self.fp = open(filename, "wb")
self.channel = channel
# Header will be written after log is done
self.fp.write(b"\x00" * FILE_HEADER_SIZE)
self.cache = []
self.cache_size = 0
self.count_of_objects = 0
self.uncompressed_size = FILE_HEADER_SIZE
self.start_timestamp = None
self.stop_timestamp = None
|
def __init__(self, filename, channel=1):
self.fp = open(filename, "wb")
self.channel = channel
# Header will be written after log is done
self.fp.write(b"\x00" * FILE_HEADER_STRUCT.size)
self.cache = []
self.cache_size = 0
self.count_of_objects = 0
self.uncompressed_size = FILE_HEADER_STRUCT.size
self.start_timestamp = None
self.stop_timestamp = None
|
https://github.com/hardbyte/python-can/issues/314
|
Traceback (most recent call last):
File "D:\eclipse-workspace\blf2csv\pythoncan_blf2csv.py", line 7, in <module>
logging = list(logging)
File "C:\Users\my_user_name\AppData\Local\Python\Python36\lib\site-packages\can\io\blf.py", line 129, in __iter__
assert header[0] == b"LOBJ", "Parse error"
AssertionError: Parse error
|
AssertionError
|
def _add_object(self, obj_type, data, timestamp=None):
if timestamp is None:
timestamp = self.stop_timestamp or time.time()
if self.start_timestamp is None:
self.start_timestamp = timestamp
self.stop_timestamp = timestamp
timestamp = int((timestamp - self.start_timestamp) * 1e9)
header_size = OBJ_HEADER_BASE_STRUCT.size + OBJ_HEADER_V1_STRUCT.size
obj_size = header_size + len(data)
base_header = OBJ_HEADER_BASE_STRUCT.pack(
b"LOBJ", header_size, 1, obj_size, obj_type
)
obj_header = OBJ_HEADER_V1_STRUCT.pack(TIME_ONE_NANS, 0, 0, max(timestamp, 0))
self.cache.append(base_header)
self.cache.append(obj_header)
self.cache.append(data)
padding_size = len(data) % 4
if padding_size:
self.cache.append(b"\x00" * padding_size)
self.cache_size += obj_size + padding_size
self.count_of_objects += 1
if self.cache_size >= self.MAX_CACHE_SIZE:
self._flush()
|
def _add_object(self, obj_type, data, timestamp=None):
if timestamp is None:
timestamp = self.stop_timestamp or time.time()
if self.start_timestamp is None:
self.start_timestamp = timestamp
self.stop_timestamp = timestamp
timestamp = int((timestamp - self.start_timestamp) * 1e9)
obj_size = HEADER_SIZE + len(data)
base_header = OBJ_HEADER_BASE_STRUCT.pack(
b"LOBJ", HEADER_SIZE, 1, obj_size, obj_type
)
obj_header = OBJ_HEADER_STRUCT.pack(2, 0, 0, max(timestamp, 0))
self.cache.append(base_header)
self.cache.append(obj_header)
self.cache.append(data)
padding_size = len(data) % 4
if padding_size:
self.cache.append(b"\x00" * padding_size)
self.cache_size += obj_size + padding_size
self.count_of_objects += 1
if self.cache_size >= self.MAX_CACHE_SIZE:
self._flush()
|
https://github.com/hardbyte/python-can/issues/314
|
Traceback (most recent call last):
File "D:\eclipse-workspace\blf2csv\pythoncan_blf2csv.py", line 7, in <module>
logging = list(logging)
File "C:\Users\my_user_name\AppData\Local\Python\Python36\lib\site-packages\can\io\blf.py", line 129, in __iter__
assert header[0] == b"LOBJ", "Parse error"
AssertionError: Parse error
|
AssertionError
|
def _flush(self):
"""Compresses and writes data in the cache to file."""
if self.fp.closed:
return
cache = b"".join(self.cache)
if not cache:
# Nothing to write
return
uncompressed_data = cache[: self.MAX_CACHE_SIZE]
# Save data that comes after max size to next round
tail = cache[self.MAX_CACHE_SIZE :]
self.cache = [tail]
self.cache_size = len(tail)
compressed_data = zlib.compress(uncompressed_data, self.COMPRESSION_LEVEL)
obj_size = (
OBJ_HEADER_V1_STRUCT.size + LOG_CONTAINER_STRUCT.size + len(compressed_data)
)
base_header = OBJ_HEADER_BASE_STRUCT.pack(
b"LOBJ", OBJ_HEADER_BASE_STRUCT.size, 1, obj_size, LOG_CONTAINER
)
container_header = LOG_CONTAINER_STRUCT.pack(ZLIB_DEFLATE, len(uncompressed_data))
self.fp.write(base_header)
self.fp.write(container_header)
self.fp.write(compressed_data)
# Write padding bytes
self.fp.write(b"\x00" * (obj_size % 4))
self.uncompressed_size += OBJ_HEADER_V1_STRUCT.size + LOG_CONTAINER_STRUCT.size
self.uncompressed_size += len(uncompressed_data)
|
def _flush(self):
"""Compresses and writes data in the cache to file."""
if self.fp.closed:
return
cache = b"".join(self.cache)
if not cache:
# Nothing to write
return
uncompressed_data = cache[: self.MAX_CACHE_SIZE]
# Save data that comes after max size to next round
tail = cache[self.MAX_CACHE_SIZE :]
self.cache = [tail]
self.cache_size = len(tail)
compressed_data = zlib.compress(uncompressed_data, self.COMPRESSION_LEVEL)
obj_size = OBJ_HEADER_STRUCT.size + LOG_CONTAINER_STRUCT.size + len(compressed_data)
base_header = OBJ_HEADER_BASE_STRUCT.pack(
b"LOBJ", OBJ_HEADER_BASE_STRUCT.size, 1, obj_size, LOG_CONTAINER
)
container_header = LOG_CONTAINER_STRUCT.pack(ZLIB_DEFLATE, len(uncompressed_data))
self.fp.write(base_header)
self.fp.write(container_header)
self.fp.write(compressed_data)
# Write padding bytes
self.fp.write(b"\x00" * (obj_size % 4))
self.uncompressed_size += OBJ_HEADER_STRUCT.size + LOG_CONTAINER_STRUCT.size
self.uncompressed_size += len(uncompressed_data)
|
https://github.com/hardbyte/python-can/issues/314
|
Traceback (most recent call last):
File "D:\eclipse-workspace\blf2csv\pythoncan_blf2csv.py", line 7, in <module>
logging = list(logging)
File "C:\Users\my_user_name\AppData\Local\Python\Python36\lib\site-packages\can\io\blf.py", line 129, in __iter__
assert header[0] == b"LOBJ", "Parse error"
AssertionError: Parse error
|
AssertionError
|
def stop(self):
"""Stops logging and closes the file."""
if self.fp.closed:
return
self._flush()
filesize = self.fp.tell()
self.fp.close()
# Write header in the beginning of the file
header = [b"LOGG", FILE_HEADER_SIZE, APPLICATION_ID, 0, 0, 0, 2, 6, 8, 1]
# The meaning of "count of objects read" is unknown
header.extend([filesize, self.uncompressed_size, self.count_of_objects, 0])
header.extend(timestamp_to_systemtime(self.start_timestamp))
header.extend(timestamp_to_systemtime(self.stop_timestamp))
with open(self.fp.name, "r+b") as f:
f.write(FILE_HEADER_STRUCT.pack(*header))
|
def stop(self):
"""Stops logging and closes the file."""
if self.fp.closed:
return
self._flush()
filesize = self.fp.tell()
self.fp.close()
# Write header in the beginning of the file
header = [b"LOGG", FILE_HEADER_STRUCT.size, APPLICATION_ID, 0, 0, 0, 2, 6, 8, 1]
# The meaning of "count of objects read" is unknown
header.extend([filesize, self.uncompressed_size, self.count_of_objects, 0])
header.extend(timestamp_to_systemtime(self.start_timestamp))
header.extend(timestamp_to_systemtime(self.stop_timestamp))
with open(self.fp.name, "r+b") as f:
f.write(FILE_HEADER_STRUCT.pack(*header))
|
https://github.com/hardbyte/python-can/issues/314
|
Traceback (most recent call last):
File "D:\eclipse-workspace\blf2csv\pythoncan_blf2csv.py", line 7, in <module>
logging = list(logging)
File "C:\Users\my_user_name\AppData\Local\Python\Python36\lib\site-packages\can\io\blf.py", line 129, in __iter__
assert header[0] == b"LOBJ", "Parse error"
AssertionError: Parse error
|
AssertionError
|
def set_filters(self, can_filters=None):
"""Apply filtering to all messages received by this Bus.
Calling without passing any filters will reset the applied filters.
Since Kvaser only supports setting one filter per handle, the filtering
will be disabled if more than one filter is requested.
:param list can_filters:
A list of dictionaries each containing a "can_id", "can_mask" and
"extended".
>>> [{"can_id": 0x11, "can_mask": 0x21, "extended": False}]
A filter matches, when ``<received_can_id> & can_mask == can_id & can_mask``
"""
if can_filters and len(can_filters) == 1:
can_id = can_filters[0]["can_id"]
can_mask = can_filters[0]["can_mask"]
extended = 1 if can_filters[0].get("extended") else 0
try:
for handle in (self._read_handle, self._write_handle):
canSetAcceptanceFilter(handle, can_id, can_mask, extended)
except NotImplementedError:
log.warning("Filtering is not supported")
else:
log.info("canlib is filtering on ID 0x%X, mask 0x%X", can_id, can_mask)
else:
log.info("Hardware filtering has been disabled")
try:
for handle in (self._read_handle, self._write_handle):
for extended in (0, 1):
canSetAcceptanceFilter(handle, 0, 0, extended)
except NotImplementedError:
pass
|
def set_filters(self, can_filters=None):
"""Apply filtering to all messages received by this Bus.
Calling without passing any filters will reset the applied filters.
Since Kvaser only supports setting one filter per handle, the filtering
will be done in the :meth:`recv` if more than one filter is requested.
:param list can_filters:
A list of dictionaries each containing a "can_id" and a "can_mask".
>>> [{"can_id": 0x11, "can_mask": 0x21}]
A filter matches, when ``<received_can_id> & can_mask == can_id & can_mask``
"""
if can_filters and len(can_filters) == 1:
can_id = can_filters[0]["can_id"]
can_mask = can_filters[0]["can_mask"]
extended = 1 if can_filters[0].get("extended") else 0
log.info("canlib is filtering on ID 0x%X, mask 0x%X", can_id, can_mask)
for handle in (self._read_handle, self._write_handle):
canSetAcceptanceFilter(handle, can_id, can_mask, extended)
else:
log.info("Hardware filtering has been disabled")
for handle in (self._read_handle, self._write_handle):
for extended in (0, 1):
canSetAcceptanceFilter(handle, 0, 0, extended)
|
https://github.com/hardbyte/python-can/issues/154
|
Traceback (most recent call last):
File "pycan-test.py", line 3, in <module>
interface = KvaserBus(0)
File "/usr/local/lib/python2.7/dist-packages/can/interfaces/kvaser/canlib.py", line 361, in __init__
self.set_filters(can_filters)
File "/usr/local/lib/python2.7/dist-packages/can/interfaces/kvaser/canlib.py", line 413, in set_filters
canSetAcceptanceFilter(handle, can_id, can_mask, ext)
File "/usr/local/lib/python2.7/dist-packages/can/interfaces/kvaser/canlib.py", line 39, in _unimplemented_function
raise NotImplementedError('This function is not implemented in canlib')
NotImplementedError: This function is not implemented in canlib
|
NotImplementedError
|
def set_filters(self, can_filters=None):
"""Apply filtering to all messages received by this Bus.
Calling without passing any filters will reset the applied filters.
Since Kvaser only supports setting one filter per handle, the filtering
will be disabled if more than one filter is requested.
:param list can_filters:
A list of dictionaries each containing a "can_id", "can_mask" and
"extended".
>>> [{"can_id": 0x11, "can_mask": 0x21, "extended": False}]
A filter matches, when ``<received_can_id> & can_mask == can_id & can_mask``
"""
if can_filters and len(can_filters) == 1:
can_id = can_filters[0]["can_id"]
can_mask = can_filters[0]["can_mask"]
extended = 1 if can_filters[0].get("extended") else 0
try:
for handle in (self._read_handle, self._write_handle):
canSetAcceptanceFilter(handle, can_id, can_mask, extended)
except (NotImplementedError, CANLIBError) as e:
log.error("Filtering is not supported - %s", e)
else:
log.info("canlib is filtering on ID 0x%X, mask 0x%X", can_id, can_mask)
else:
log.info("Hardware filtering has been disabled")
try:
for handle in (self._read_handle, self._write_handle):
for extended in (0, 1):
canSetAcceptanceFilter(handle, 0, 0, extended)
except (NotImplementedError, CANLIBError):
pass
|
def set_filters(self, can_filters=None):
"""Apply filtering to all messages received by this Bus.
Calling without passing any filters will reset the applied filters.
Since Kvaser only supports setting one filter per handle, the filtering
will be disabled if more than one filter is requested.
:param list can_filters:
A list of dictionaries each containing a "can_id", "can_mask" and
"extended".
>>> [{"can_id": 0x11, "can_mask": 0x21, "extended": False}]
A filter matches, when ``<received_can_id> & can_mask == can_id & can_mask``
"""
if can_filters and len(can_filters) == 1:
can_id = can_filters[0]["can_id"]
can_mask = can_filters[0]["can_mask"]
extended = 1 if can_filters[0].get("extended") else 0
try:
for handle in (self._read_handle, self._write_handle):
canSetAcceptanceFilter(handle, can_id, can_mask, extended)
except NotImplementedError:
log.warning("Filtering is not supported")
else:
log.info("canlib is filtering on ID 0x%X, mask 0x%X", can_id, can_mask)
else:
log.info("Hardware filtering has been disabled")
try:
for handle in (self._read_handle, self._write_handle):
for extended in (0, 1):
canSetAcceptanceFilter(handle, 0, 0, extended)
except NotImplementedError:
pass
|
https://github.com/hardbyte/python-can/issues/154
|
Traceback (most recent call last):
File "pycan-test.py", line 3, in <module>
interface = KvaserBus(0)
File "/usr/local/lib/python2.7/dist-packages/can/interfaces/kvaser/canlib.py", line 361, in __init__
self.set_filters(can_filters)
File "/usr/local/lib/python2.7/dist-packages/can/interfaces/kvaser/canlib.py", line 413, in set_filters
canSetAcceptanceFilter(handle, can_id, can_mask, ext)
File "/usr/local/lib/python2.7/dist-packages/can/interfaces/kvaser/canlib.py", line 39, in _unimplemented_function
raise NotImplementedError('This function is not implemented in canlib')
NotImplementedError: This function is not implemented in canlib
|
NotImplementedError
|
def db_writer_thread(self):
num_frames = 0
last_write = time.time()
self._create_db()
while not self.stop_running_event.is_set():
messages = []
m = self.get_message(self.GET_MESSAGE_TIMEOUT)
while m is not None:
log.debug("sqlitewriter buffering message")
messages.append(
(
m.timestamp,
m.arbitration_id,
m.id_type,
m.is_remote_frame,
m.is_error_frame,
m.dlc,
buffer(m.data),
)
)
if time.time() - last_write > self.MAX_TIME_BETWEEN_WRITES:
log.debug("Max timeout between writes reached")
break
m = self.get_message(self.GET_MESSAGE_TIMEOUT)
if len(messages) > 0:
with self.conn:
log.debug("Writing %s frames to db", len(messages))
self.conn.executemany(SqliteWriter.insert_msg_template, messages)
num_frames += len(messages)
last_write = time.time()
self.conn.close()
log.info("Stopped sqlite writer after writing %s messages", num_frames)
|
def db_writer_thread(self):
num_frames = 0
last_write = time.time()
self._create_db()
while not self.stop_running_event.is_set():
messages = []
m = self.get_message(self.GET_MESSAGE_TIMEOUT)
while m is not None:
log.debug("sqlitewriter buffering message")
messages.append(
(
m.timestamp,
m.arbitration_id,
m.id_type,
m.is_remote_frame,
m.is_error_frame,
m.dlc,
buffer(m.data),
)
)
m = self.get_message(self.GET_MESSAGE_TIMEOUT)
if time.time() - last_write > self.MAX_TIME_BETWEEN_WRITES:
log.debug("Max timeout between writes reached")
break
if len(messages) > 0:
with self.conn:
log.debug("Writing %s frames to db", len(messages))
self.conn.executemany(SqliteWriter.insert_msg_template, messages)
num_frames += len(messages)
last_write = time.time()
self.conn.close()
log.info("Stopped sqlite writer after writing %s messages", num_frames)
|
https://github.com/hardbyte/python-can/issues/139
|
======================================================================
FAIL: test_sql_reader (listener_test.FileReaderTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/hardbyte/python-can/test/listener_test.py", line 179, in test_sql_reader
self.assertEqual(len(ms), 1)
AssertionError: 0 != 1
-------------------- >> begin captured logging << --------------------
can: DEBUG: can config: {'channel': None, 'interface': 'virtual'}
can.io.sql: INFO: Creating sqlite db
can.io.sql: DEBUG: Stopping sqlite writer
can.io.sql: INFO: Stopped sqlite writer after writing 0 messages
can.io.sql: DEBUG: Starting sqlreader with /tmp/tmpcqqrl_m3
can.io.sql: DEBUG: Iterating through messages from sql db
--------------------- >> end captured logging << ---------------------
|
AssertionError
|
def __init__(self, filename):
self.csv_file = open(filename, "wt")
# Write a header row
self.csv_file.write(
"timestamp, arbitration id, extended, remote, error, dlc, data\n"
)
|
def __init__(self, filename):
self.csv_file = open(filename, "wt")
# Write a header row
self.csv_file.write("timestamp, arbitrationid, flags, dlc, data")
|
https://github.com/hardbyte/python-can/issues/72
|
#!shell
(py35_python_can) e:\bitbucket_hg\python-can\bin>python can_logger.py -f ./temp.txt
Can Logger (Started on 2016-07-19 09:49:10.052735)
enter..
Shutdown Bus and Exit.
exit..
close exit ..
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Python35\lib\threading.py", line 914, in _bootstrap_inner
self.run()
File "C:\Python35\lib\threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "e:\bitbucket_hg\python-can\can\notifier.py", line 32, in rx_thread
msg = self.bus.recv(self.timeout)
File "e:\bitbucket_hg\python-can\can\interfaces\pcan.py", line 169, in recv
raise Exception(self._get_formatted_error(result[0]))
Exception: Der PCAN Kanal ist nicht initialisiert oder der Initialisierungsvorgang ist fehlgeschlagen
Exception ignored in: <bound method Printer.__del__ of <can.CAN.Printer object at 0x00000000015C3550>>
Traceback (most recent call last):
File "e:\bitbucket_hg\python-can\can\CAN.py", line 97, in __del__
ValueError: I/O operation on closed file.
(py35_python_can) e:\bitbucket_hg\python-can\bin>
|
ValueError
|
def on_message_received(self, msg):
row = ",".join(
[
str(msg.timestamp),
hex(msg.arbitration_id),
"1" if msg.id_type else "0",
"1" if msg.is_remote_frame else "0",
"1" if msg.is_error_frame else "0",
str(msg.dlc),
base64.b64encode(msg.data).decode("utf8"),
]
)
self.csv_file.write(row + "\n")
|
def on_message_received(self, msg):
row = ",".join([msg.timestamp, msg.arbitration_id, msg.flags, msg.dlc, msg.data])
self.csv_file.write(row + "\n")
|
https://github.com/hardbyte/python-can/issues/72
|
#!shell
(py35_python_can) e:\bitbucket_hg\python-can\bin>python can_logger.py -f ./temp.txt
Can Logger (Started on 2016-07-19 09:49:10.052735)
enter..
Shutdown Bus and Exit.
exit..
close exit ..
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Python35\lib\threading.py", line 914, in _bootstrap_inner
self.run()
File "C:\Python35\lib\threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "e:\bitbucket_hg\python-can\can\notifier.py", line 32, in rx_thread
msg = self.bus.recv(self.timeout)
File "e:\bitbucket_hg\python-can\can\interfaces\pcan.py", line 169, in recv
raise Exception(self._get_formatted_error(result[0]))
Exception: Der PCAN Kanal ist nicht initialisiert oder der Initialisierungsvorgang ist fehlgeschlagen
Exception ignored in: <bound method Printer.__del__ of <can.CAN.Printer object at 0x00000000015C3550>>
Traceback (most recent call last):
File "e:\bitbucket_hg\python-can\can\CAN.py", line 97, in __del__
ValueError: I/O operation on closed file.
(py35_python_can) e:\bitbucket_hg\python-can\bin>
|
ValueError
|
def __init__(self, bus, listeners, timeout=None):
"""Manages the distribution of **Messages** from a given bus to a
list of listeners.
:param bus: The :ref:`bus` to listen too.
:param listeners: An iterable of :class:`~can.Listeners`
:param timeout: An optional maximum number of seconds to wait for any message.
"""
self.listeners = listeners
self.bus = bus
self.timeout = timeout
self.running = threading.Event()
self.running.set()
self._reader = threading.Thread(target=self.rx_thread)
self._reader.daemon = True
self._reader.start()
|
def __init__(self, bus, listeners, timeout=None):
"""Manages the distribution of **Messages** from a given bus to a
list of listeners.
:param bus: The :class:`~can.Bus` to listen too.
:param listeners: An iterable of :class:`~can.Listeners`
:param timeout: An optional maximum number of seconds to wait for any message.
"""
self.listeners = listeners
self.bus = bus
self.timeout = timeout
self.running = threading.Event()
self.running.set()
self._reader = threading.Thread(target=self.rx_thread)
self._reader.daemon = True
self._reader.start()
|
https://github.com/hardbyte/python-can/issues/72
|
#!shell
(py35_python_can) e:\bitbucket_hg\python-can\bin>python can_logger.py -f ./temp.txt
Can Logger (Started on 2016-07-19 09:49:10.052735)
enter..
Shutdown Bus and Exit.
exit..
close exit ..
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Python35\lib\threading.py", line 914, in _bootstrap_inner
self.run()
File "C:\Python35\lib\threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "e:\bitbucket_hg\python-can\can\notifier.py", line 32, in rx_thread
msg = self.bus.recv(self.timeout)
File "e:\bitbucket_hg\python-can\can\interfaces\pcan.py", line 169, in recv
raise Exception(self._get_formatted_error(result[0]))
Exception: Der PCAN Kanal ist nicht initialisiert oder der Initialisierungsvorgang ist fehlgeschlagen
Exception ignored in: <bound method Printer.__del__ of <can.CAN.Printer object at 0x00000000015C3550>>
Traceback (most recent call last):
File "e:\bitbucket_hg\python-can\can\CAN.py", line 97, in __del__
ValueError: I/O operation on closed file.
(py35_python_can) e:\bitbucket_hg\python-can\bin>
|
ValueError
|
def stop(self):
"""Stop notifying Listeners when new :class:`~can.Message` objects arrive
and call :meth:`~can.Listener.stop` on each Listener."""
self.running.clear()
if self.timeout is not None:
self._reader.join(self.timeout + 0.1)
|
def stop(self):
self.running.clear()
if self.timeout is not None:
self._reader.join(self.timeout + 0.1)
|
https://github.com/hardbyte/python-can/issues/72
|
#!shell
(py35_python_can) e:\bitbucket_hg\python-can\bin>python can_logger.py -f ./temp.txt
Can Logger (Started on 2016-07-19 09:49:10.052735)
enter..
Shutdown Bus and Exit.
exit..
close exit ..
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Python35\lib\threading.py", line 914, in _bootstrap_inner
self.run()
File "C:\Python35\lib\threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "e:\bitbucket_hg\python-can\can\notifier.py", line 32, in rx_thread
msg = self.bus.recv(self.timeout)
File "e:\bitbucket_hg\python-can\can\interfaces\pcan.py", line 169, in recv
raise Exception(self._get_formatted_error(result[0]))
Exception: Der PCAN Kanal ist nicht initialisiert oder der Initialisierungsvorgang ist fehlgeschlagen
Exception ignored in: <bound method Printer.__del__ of <can.CAN.Printer object at 0x00000000015C3550>>
Traceback (most recent call last):
File "e:\bitbucket_hg\python-can\can\CAN.py", line 97, in __del__
ValueError: I/O operation on closed file.
(py35_python_can) e:\bitbucket_hg\python-can\bin>
|
ValueError
|
def rx_thread(self):
while self.running.is_set():
msg = self.bus.recv(self.timeout)
if msg is not None:
for callback in self.listeners:
callback(msg)
for listener in self.listeners:
listener.stop()
|
def rx_thread(self):
while self.running.is_set():
msg = self.bus.recv(self.timeout)
if msg is not None:
for callback in self.listeners:
callback(msg)
|
https://github.com/hardbyte/python-can/issues/72
|
#!shell
(py35_python_can) e:\bitbucket_hg\python-can\bin>python can_logger.py -f ./temp.txt
Can Logger (Started on 2016-07-19 09:49:10.052735)
enter..
Shutdown Bus and Exit.
exit..
close exit ..
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Python35\lib\threading.py", line 914, in _bootstrap_inner
self.run()
File "C:\Python35\lib\threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "e:\bitbucket_hg\python-can\can\notifier.py", line 32, in rx_thread
msg = self.bus.recv(self.timeout)
File "e:\bitbucket_hg\python-can\can\interfaces\pcan.py", line 169, in recv
raise Exception(self._get_formatted_error(result[0]))
Exception: Der PCAN Kanal ist nicht initialisiert oder der Initialisierungsvorgang ist fehlgeschlagen
Exception ignored in: <bound method Printer.__del__ of <can.CAN.Printer object at 0x00000000015C3550>>
Traceback (most recent call last):
File "e:\bitbucket_hg\python-can\can\CAN.py", line 97, in __del__
ValueError: I/O operation on closed file.
(py35_python_can) e:\bitbucket_hg\python-can\bin>
|
ValueError
|
async def get(self):
user = self.current_user
if user is None:
# whoami can be accessed via oauth token
user = self.get_current_user_oauth_token()
if user is None:
raise web.HTTPError(403)
if isinstance(user, orm.Service):
model = self.service_model(user)
else:
model = self.user_model(user)
self.write(json.dumps(model))
|
async def get(self):
user = self.current_user
if user is None:
# whoami can be accessed via oauth token
user = self.get_current_user_oauth_token()
if user is None:
raise web.HTTPError(403)
self.write(json.dumps(self.user_model(user)))
|
https://github.com/jupyterhub/jupyterhub/issues/3217
|
Uncaught exception GET /hub/api/user (172.17.0.2)
HTTPServerRequest(protocol='http', host='172.17.0.2:8081', method='GET', uri='/hub/api/user', version='HTTP/1.1', remote_ip='172.17.0.2')
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/tornado/web.py", line 1703, in _execute
result = await result
File "/usr/local/lib/python3.6/dist-packages/jupyterhub/apihandlers/users.py", line 38, in get
self.write(json.dumps(self.user_model(user)))
File "/usr/local/lib/python3.6/dist-packages/jupyterhub/apihandlers/base.py", line 193, in user_model
'groups': [g.name for g in user.groups],
AttributeError: 'Service' object has no attribute 'groups'
|
AttributeError
|
async def shutdown_cancel_tasks(self, sig):
"""Cancel all other tasks of the event loop and initiate cleanup"""
self.log.critical("Received signal %s, initiating shutdown...", sig.name)
tasks = [t for t in asyncio_all_tasks() if t is not asyncio_current_task()]
if tasks:
self.log.debug("Cancelling pending tasks")
[t.cancel() for t in tasks]
try:
await asyncio.wait(tasks)
except asyncio.CancelledError as e:
self.log.debug("Caught Task CancelledError. Ignoring")
except StopAsyncIteration as e:
self.log.error("Caught StopAsyncIteration Exception", exc_info=True)
tasks = [t for t in asyncio_all_tasks()]
for t in tasks:
self.log.debug("Task status: %s", t)
await self.cleanup()
asyncio.get_event_loop().stop()
|
async def shutdown_cancel_tasks(self, sig):
"""Cancel all other tasks of the event loop and initiate cleanup"""
self.log.critical("Received signal %s, initiating shutdown...", sig.name)
tasks = [
t for t in asyncio.Task.all_tasks() if t is not asyncio.Task.current_task()
]
if tasks:
self.log.debug("Cancelling pending tasks")
[t.cancel() for t in tasks]
try:
await asyncio.wait(tasks)
except asyncio.CancelledError as e:
self.log.debug("Caught Task CancelledError. Ignoring")
except StopAsyncIteration as e:
self.log.error("Caught StopAsyncIteration Exception", exc_info=True)
tasks = [t for t in asyncio.Task.all_tasks()]
for t in tasks:
self.log.debug("Task status: %s", t)
await self.cleanup()
asyncio.get_event_loop().stop()
|
https://github.com/jupyterhub/jupyterhub/issues/3297
|
^C[C 2020-12-02 16:16:45.801 JupyterHub app:2810] Received signal SIGINT, initiating shutdown...
ERROR:asyncio:Task exception was never retrieved
future: <Task finished name='Task-8' coro=<JupyterHub.shutdown_cancel_tasks() done, defined at /opt/miniconda3/envs/jupyterhub/lib/python3.9/site-packages/jupyterhub/app.py:2808> exception=AttributeError("type object '_asyncio.Task' has no attribute 'all_tasks'")>
Traceback (most recent call last):
File "/opt/miniconda3/envs/jupyterhub/lib/python3.9/site-packages/jupyterhub/app.py", line 2812, in shutdown_cancel_tasks
t for t in asyncio.Task.all_tasks() if t is not asyncio.Task.current_task()
AttributeError: type object '_asyncio.Task' has no attribute 'all_tasks'
|
AttributeError
|
def print_stacks(file=sys.stderr):
"""Print current status of the process
For debugging purposes.
Used as part of SIGINFO handler.
- Shows active thread count
- Shows current stack for all threads
Parameters:
file: file to write output to (default: stderr)
"""
# local imports because these will not be used often,
# no need to add them to startup
import asyncio
import traceback
from .log import coroutine_frames
print("Active threads: %i" % threading.active_count(), file=file)
for thread in threading.enumerate():
print("Thread %s:" % thread.name, end="", file=file)
frame = sys._current_frames()[thread.ident]
stack = traceback.extract_stack(frame)
if thread is threading.current_thread():
# truncate last two frames of the current thread
# which are this function and its caller
stack = stack[:-2]
stack = coroutine_frames(stack)
if stack:
last_frame = stack[-1]
if (
last_frame[0].endswith("threading.py")
and last_frame[-1] == "waiter.acquire()"
) or (
last_frame[0].endswith("thread.py")
and last_frame[-1].endswith("work_queue.get(block=True)")
):
# thread is waiting on a condition
# call it idle rather than showing the uninteresting stack
# most threadpools will be in this state
print(" idle", file=file)
continue
print("".join(["\n"] + traceback.format_list(stack)), file=file)
# also show asyncio tasks, if any
# this will increase over time as we transition from tornado
# coroutines to native `async def`
tasks = asyncio_all_tasks()
if tasks:
print("AsyncIO tasks: %i" % len(tasks))
for task in tasks:
task.print_stack(file=file)
|
def print_stacks(file=sys.stderr):
"""Print current status of the process
For debugging purposes.
Used as part of SIGINFO handler.
- Shows active thread count
- Shows current stack for all threads
Parameters:
file: file to write output to (default: stderr)
"""
# local imports because these will not be used often,
# no need to add them to startup
import asyncio
import traceback
from .log import coroutine_frames
print("Active threads: %i" % threading.active_count(), file=file)
for thread in threading.enumerate():
print("Thread %s:" % thread.name, end="", file=file)
frame = sys._current_frames()[thread.ident]
stack = traceback.extract_stack(frame)
if thread is threading.current_thread():
# truncate last two frames of the current thread
# which are this function and its caller
stack = stack[:-2]
stack = coroutine_frames(stack)
if stack:
last_frame = stack[-1]
if (
last_frame[0].endswith("threading.py")
and last_frame[-1] == "waiter.acquire()"
) or (
last_frame[0].endswith("thread.py")
and last_frame[-1].endswith("work_queue.get(block=True)")
):
# thread is waiting on a condition
# call it idle rather than showing the uninteresting stack
# most threadpools will be in this state
print(" idle", file=file)
continue
print("".join(["\n"] + traceback.format_list(stack)), file=file)
# also show asyncio tasks, if any
# this will increase over time as we transition from tornado
# coroutines to native `async def`
tasks = asyncio.Task.all_tasks()
if tasks:
print("AsyncIO tasks: %i" % len(tasks))
for task in tasks:
task.print_stack(file=file)
|
https://github.com/jupyterhub/jupyterhub/issues/3297
|
^C[C 2020-12-02 16:16:45.801 JupyterHub app:2810] Received signal SIGINT, initiating shutdown...
ERROR:asyncio:Task exception was never retrieved
future: <Task finished name='Task-8' coro=<JupyterHub.shutdown_cancel_tasks() done, defined at /opt/miniconda3/envs/jupyterhub/lib/python3.9/site-packages/jupyterhub/app.py:2808> exception=AttributeError("type object '_asyncio.Task' has no attribute 'all_tasks'")>
Traceback (most recent call last):
File "/opt/miniconda3/envs/jupyterhub/lib/python3.9/site-packages/jupyterhub/app.py", line 2812, in shutdown_cancel_tasks
t for t in asyncio.Task.all_tasks() if t is not asyncio.Task.current_task()
AttributeError: type object '_asyncio.Task' has no attribute 'all_tasks'
|
AttributeError
|
async def api_request(self, path, method="GET", body=None, client=None):
"""Make an authenticated API request of the proxy."""
client = client or AsyncHTTPClient()
url = url_path_join(self.api_url, "api/routes", path)
if isinstance(body, dict):
body = json.dumps(body)
self.log.debug("Proxy: Fetching %s %s", method, url)
req = HTTPRequest(
url,
method=method,
headers={"Authorization": "token {}".format(self.auth_token)},
body=body,
connect_timeout=3, # default: 20s
request_timeout=10, # default: 20s
)
async def _wait_for_api_request():
try:
async with self.semaphore:
return await client.fetch(req)
except HTTPError as e:
# Retry on potentially transient errors in CHP, typically
# numbered 500 and up. Note that CHP isn't able to emit 429
# errors.
if e.code >= 500:
self.log.warning(
"api_request to the proxy failed with status code {}, retrying...".format(
e.code
)
)
return False # a falsy return value make exponential_backoff retry
else:
self.log.error("api_request to proxy failed: {0}".format(e))
# An unhandled error here will help the hub invoke cleanup logic
raise
result = await exponential_backoff(
_wait_for_api_request,
'Repeated api_request to proxy path "{}" failed.'.format(path),
timeout=30,
)
return result
|
async def api_request(self, path, method="GET", body=None, client=None):
"""Make an authenticated API request of the proxy."""
client = client or AsyncHTTPClient()
url = url_path_join(self.api_url, "api/routes", path)
if isinstance(body, dict):
body = json.dumps(body)
self.log.debug("Proxy: Fetching %s %s", method, url)
req = HTTPRequest(
url,
method=method,
headers={"Authorization": "token {}".format(self.auth_token)},
body=body,
)
async with self.semaphore:
result = await client.fetch(req)
return result
|
https://github.com/jupyterhub/jupyterhub/issues/3222
|
[I 2020-10-20 16:48:25.540 JupyterHub base:893] User testuser-aa6aa5ea-64bb-4719-93c7-04557f25c3d5 took 2.179 seconds to start
[I 2020-10-20 16:48:25.540 JupyterHub proxy:257] Adding user testuser-aa6aa5ea-64bb-4719-93c7-04557f25c3d5 to proxy /user/testuser-aa6aa5ea-64bb-4719-93c7-04557f25c3d5/ => http://192.168.79.75:8888
[D 2020-10-20 16:48:25.540 JupyterHub proxy:765] Proxy: Fetching POST http://proxy-api:8001/api/routes/user/testuser-aa6aa5ea-64bb-4719-93c7-04557f25c3d5
[I 2020-10-20 16:48:25.541 JupyterHub log:181] 200 GET /hub/api (@10.20.0.120) 1.70ms
[E 2020-10-20 16:48:25.542 JupyterHub base:915] Failed to add testuser-aa6aa5ea-64bb-4719-93c7-04557f25c3d5 to proxy!
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/jupyterhub/handlers/base.py", line 908, in finish_user_spawn
await self.proxy.add_user(user, server_name)
File "/usr/local/lib/python3.8/dist-packages/jupyterhub/proxy.py", line 270, in add_user
await self.add_route(
File "/usr/local/lib/python3.8/dist-packages/jupyterhub/proxy.py", line 781, in add_route
await self.api_request(path, method='POST', body=body)
File "/usr/local/lib/python3.8/dist-packages/jupyterhub/proxy.py", line 773, in api_request
result = await client.fetch(req)
tornado.curl_httpclient.CurlError: HTTP 599: Send failure: Broken pipe
|
tornado.curl_httpclient.CurlError
|
async def spawn_single_user(self, user, server_name="", options=None):
# in case of error, include 'try again from /hub/home' message
if self.authenticator.refresh_pre_spawn:
auth_user = await self.refresh_auth(user, force=True)
if auth_user is None:
raise web.HTTPError(403, "auth has expired for %s, login again", user.name)
spawn_start_time = time.perf_counter()
self.extra_error_html = self.spawn_home_error
user_server_name = user.name
if server_name:
user_server_name = "%s:%s" % (user.name, server_name)
if server_name in user.spawners and user.spawners[server_name].pending:
pending = user.spawners[server_name].pending
SERVER_SPAWN_DURATION_SECONDS.labels(
status=ServerSpawnStatus.already_pending
).observe(time.perf_counter() - spawn_start_time)
raise RuntimeError("%s pending %s" % (user_server_name, pending))
# count active servers and pending spawns
# we could do careful bookkeeping to avoid
# but for 10k users this takes ~5ms
# and saves us from bookkeeping errors
active_counts = self.users.count_active_users()
spawn_pending_count = (
active_counts["spawn_pending"] + active_counts["proxy_pending"]
)
active_count = active_counts["active"]
RUNNING_SERVERS.set(active_count)
concurrent_spawn_limit = self.concurrent_spawn_limit
active_server_limit = self.active_server_limit
if concurrent_spawn_limit and spawn_pending_count >= concurrent_spawn_limit:
SERVER_SPAWN_DURATION_SECONDS.labels(
status=ServerSpawnStatus.throttled
).observe(time.perf_counter() - spawn_start_time)
# Suggest number of seconds client should wait before retrying
# This helps prevent thundering herd problems, where users simply
# immediately retry when we are overloaded.
retry_range = self.settings["spawn_throttle_retry_range"]
retry_time = int(random.uniform(*retry_range))
# round suggestion to nicer human value (nearest 10 seconds or minute)
if retry_time <= 90:
# round human seconds up to nearest 10
human_retry_time = "%i0 seconds" % math.ceil(retry_time / 10.0)
else:
# round number of minutes
human_retry_time = "%i minutes" % math.round(retry_time / 60.0)
self.log.warning(
"%s pending spawns, throttling. Suggested retry in %s seconds.",
spawn_pending_count,
retry_time,
)
err = web.HTTPError(
429,
"Too many users trying to log in right now. Try again in {}.".format(
human_retry_time
),
)
# can't call set_header directly here because it gets ignored
# when errors are raised
# we handle err.headers ourselves in Handler.write_error
err.headers = {"Retry-After": retry_time}
raise err
if active_server_limit and active_count >= active_server_limit:
self.log.info("%s servers active, no space available", active_count)
SERVER_SPAWN_DURATION_SECONDS.labels(
status=ServerSpawnStatus.too_many_users
).observe(time.perf_counter() - spawn_start_time)
raise web.HTTPError(
429, "Active user limit exceeded. Try again in a few minutes."
)
tic = IOLoop.current().time()
self.log.debug("Initiating spawn for %s", user_server_name)
spawn_future = user.spawn(server_name, options, handler=self)
self.log.debug(
"%i%s concurrent spawns",
spawn_pending_count,
"/%i" % concurrent_spawn_limit if concurrent_spawn_limit else "",
)
self.log.debug(
"%i%s active servers",
active_count,
"/%i" % active_server_limit if active_server_limit else "",
)
spawner = user.spawners[server_name]
# set spawn_pending now, so there's no gap where _spawn_pending is False
# while we are waiting for _proxy_pending to be set
spawner._spawn_pending = True
async def finish_user_spawn():
"""Finish the user spawn by registering listeners and notifying the proxy.
If the spawner is slow to start, this is passed as an async callback,
otherwise it is called immediately.
"""
# wait for spawn Future
await spawn_future
toc = IOLoop.current().time()
self.log.info("User %s took %.3f seconds to start", user_server_name, toc - tic)
self.statsd.timing("spawner.success", (toc - tic) * 1000)
SERVER_SPAWN_DURATION_SECONDS.labels(status=ServerSpawnStatus.success).observe(
time.perf_counter() - spawn_start_time
)
self.eventlog.record_event(
"hub.jupyter.org/server-action",
1,
{"action": "start", "username": user.name, "servername": server_name},
)
proxy_add_start_time = time.perf_counter()
spawner._proxy_pending = True
try:
await self.proxy.add_user(user, server_name)
PROXY_ADD_DURATION_SECONDS.labels(status="success").observe(
time.perf_counter() - proxy_add_start_time
)
RUNNING_SERVERS.inc()
except Exception:
self.log.exception("Failed to add %s to proxy!", user_server_name)
self.log.error("Stopping %s to avoid inconsistent state", user_server_name)
await user.stop(server_name)
PROXY_ADD_DURATION_SECONDS.labels(status="failure").observe(
time.perf_counter() - proxy_add_start_time
)
else:
spawner.add_poll_callback(self.user_stopped, user, server_name)
finally:
spawner._proxy_pending = False
# hook up spawner._spawn_future so that other requests can await
# this result
finish_spawn_future = spawner._spawn_future = maybe_future(finish_user_spawn())
def _clear_spawn_future(f):
# clear spawner._spawn_future when it's done
# keep an exception around, though, to prevent repeated implicit spawns
# if spawn is failing
if f.cancelled() or f.exception() is None:
spawner._spawn_future = None
# Now we're all done. clear _spawn_pending flag
spawner._spawn_pending = False
finish_spawn_future.add_done_callback(_clear_spawn_future)
# when spawn finishes (success or failure)
# update failure count and abort if consecutive failure limit
# is reached
def _track_failure_count(f):
if f.cancelled() or f.exception() is None:
# spawn succeeded, reset failure count
self.settings["failure_count"] = 0
return
# spawn failed, increment count and abort if limit reached
self.settings.setdefault("failure_count", 0)
self.settings["failure_count"] += 1
failure_count = self.settings["failure_count"]
failure_limit = spawner.consecutive_failure_limit
if failure_limit and 1 < failure_count < failure_limit:
self.log.warning(
"%i consecutive spawns failed. "
"Hub will exit if failure count reaches %i before succeeding",
failure_count,
failure_limit,
)
if failure_limit and failure_count >= failure_limit:
self.log.critical(
"Aborting due to %i consecutive spawn failures", failure_count
)
# abort in 2 seconds to allow pending handlers to resolve
# mostly propagating errors for the current failures
def abort():
raise SystemExit(1)
IOLoop.current().call_later(2, abort)
finish_spawn_future.add_done_callback(_track_failure_count)
try:
await gen.with_timeout(
timedelta(seconds=self.slow_spawn_timeout), finish_spawn_future
)
except gen.TimeoutError:
# waiting_for_response indicates server process has started,
# but is yet to become responsive.
if spawner._spawn_pending and not spawner._waiting_for_response:
# still in Spawner.start, which is taking a long time
# we shouldn't poll while spawn is incomplete.
self.log.warning(
"User %s is slow to start (timeout=%s)",
user_server_name,
self.slow_spawn_timeout,
)
return
# start has finished, but the server hasn't come up
# check if the server died while we were waiting
poll_start_time = time.perf_counter()
status = await spawner.poll()
SERVER_POLL_DURATION_SECONDS.labels(
status=ServerPollStatus.from_status(status)
).observe(time.perf_counter() - poll_start_time)
if status is not None:
toc = IOLoop.current().time()
self.statsd.timing("spawner.failure", (toc - tic) * 1000)
SERVER_SPAWN_DURATION_SECONDS.labels(
status=ServerSpawnStatus.failure
).observe(time.perf_counter() - spawn_start_time)
raise web.HTTPError(
500,
"Spawner failed to start [status=%s]. The logs for %s may contain details."
% (status, spawner._log_name),
)
if spawner._waiting_for_response:
# hit timeout waiting for response, but server's running.
# Hope that it'll show up soon enough,
# though it's possible that it started at the wrong URL
self.log.warning(
"User %s is slow to become responsive (timeout=%s)",
user_server_name,
self.slow_spawn_timeout,
)
self.log.debug(
"Expecting server for %s at: %s",
user_server_name,
spawner.server.url,
)
if spawner._proxy_pending:
# User.spawn finished, but it hasn't been added to the proxy
# Could be due to load or a slow proxy
self.log.warning(
"User %s is slow to be added to the proxy (timeout=%s)",
user_server_name,
self.slow_spawn_timeout,
)
|
async def spawn_single_user(self, user, server_name="", options=None):
# in case of error, include 'try again from /hub/home' message
if self.authenticator.refresh_pre_spawn:
auth_user = await self.refresh_auth(user, force=True)
if auth_user is None:
raise web.HTTPError(403, "auth has expired for %s, login again", user.name)
spawn_start_time = time.perf_counter()
self.extra_error_html = self.spawn_home_error
user_server_name = user.name
if server_name:
user_server_name = "%s:%s" % (user.name, server_name)
if server_name in user.spawners and user.spawners[server_name].pending:
pending = user.spawners[server_name].pending
SERVER_SPAWN_DURATION_SECONDS.labels(
status=ServerSpawnStatus.already_pending
).observe(time.perf_counter() - spawn_start_time)
raise RuntimeError("%s pending %s" % (user_server_name, pending))
# count active servers and pending spawns
# we could do careful bookkeeping to avoid
# but for 10k users this takes ~5ms
# and saves us from bookkeeping errors
active_counts = self.users.count_active_users()
spawn_pending_count = (
active_counts["spawn_pending"] + active_counts["proxy_pending"]
)
active_count = active_counts["active"]
RUNNING_SERVERS.set(active_count)
concurrent_spawn_limit = self.concurrent_spawn_limit
active_server_limit = self.active_server_limit
if concurrent_spawn_limit and spawn_pending_count >= concurrent_spawn_limit:
SERVER_SPAWN_DURATION_SECONDS.labels(
status=ServerSpawnStatus.throttled
).observe(time.perf_counter() - spawn_start_time)
# Suggest number of seconds client should wait before retrying
# This helps prevent thundering herd problems, where users simply
# immediately retry when we are overloaded.
retry_range = self.settings["spawn_throttle_retry_range"]
retry_time = int(random.uniform(*retry_range))
# round suggestion to nicer human value (nearest 10 seconds or minute)
if retry_time <= 90:
# round human seconds up to nearest 10
human_retry_time = "%i0 seconds" % math.ceil(retry_time / 10.0)
else:
# round number of minutes
human_retry_time = "%i minutes" % math.round(retry_time / 60.0)
self.log.warning(
"%s pending spawns, throttling. Suggested retry in %s seconds.",
spawn_pending_count,
retry_time,
)
err = web.HTTPError(
429,
"Too many users trying to log in right now. Try again in {}.".format(
human_retry_time
),
)
# can't call set_header directly here because it gets ignored
# when errors are raised
# we handle err.headers ourselves in Handler.write_error
err.headers = {"Retry-After": retry_time}
raise err
if active_server_limit and active_count >= active_server_limit:
self.log.info("%s servers active, no space available", active_count)
SERVER_SPAWN_DURATION_SECONDS.labels(
status=ServerSpawnStatus.too_many_users
).observe(time.perf_counter() - spawn_start_time)
raise web.HTTPError(
429, "Active user limit exceeded. Try again in a few minutes."
)
tic = IOLoop.current().time()
self.log.debug("Initiating spawn for %s", user_server_name)
spawn_future = user.spawn(server_name, options, handler=self)
self.log.debug(
"%i%s concurrent spawns",
spawn_pending_count,
"/%i" % concurrent_spawn_limit if concurrent_spawn_limit else "",
)
self.log.debug(
"%i%s active servers",
active_count,
"/%i" % active_server_limit if active_server_limit else "",
)
spawner = user.spawners[server_name]
# set spawn_pending now, so there's no gap where _spawn_pending is False
# while we are waiting for _proxy_pending to be set
spawner._spawn_pending = True
async def finish_user_spawn():
"""Finish the user spawn by registering listeners and notifying the proxy.
If the spawner is slow to start, this is passed as an async callback,
otherwise it is called immediately.
"""
# wait for spawn Future
await spawn_future
toc = IOLoop.current().time()
self.log.info("User %s took %.3f seconds to start", user_server_name, toc - tic)
self.statsd.timing("spawner.success", (toc - tic) * 1000)
SERVER_SPAWN_DURATION_SECONDS.labels(status=ServerSpawnStatus.success).observe(
time.perf_counter() - spawn_start_time
)
self.eventlog.record_event(
"hub.jupyter.org/server-action",
1,
{"action": "start", "username": user.name, "servername": server_name},
)
proxy_add_start_time = time.perf_counter()
spawner._proxy_pending = True
try:
await self.proxy.add_user(user, server_name)
PROXY_ADD_DURATION_SECONDS.labels(status="success").observe(
time.perf_counter() - proxy_add_start_time
)
RUNNING_SERVERS.inc()
except Exception:
self.log.exception("Failed to add %s to proxy!", user_server_name)
self.log.error("Stopping %s to avoid inconsistent state", user_server_name)
await user.stop()
PROXY_ADD_DURATION_SECONDS.labels(status="failure").observe(
time.perf_counter() - proxy_add_start_time
)
else:
spawner.add_poll_callback(self.user_stopped, user, server_name)
finally:
spawner._proxy_pending = False
# hook up spawner._spawn_future so that other requests can await
# this result
finish_spawn_future = spawner._spawn_future = maybe_future(finish_user_spawn())
def _clear_spawn_future(f):
# clear spawner._spawn_future when it's done
# keep an exception around, though, to prevent repeated implicit spawns
# if spawn is failing
if f.cancelled() or f.exception() is None:
spawner._spawn_future = None
# Now we're all done. clear _spawn_pending flag
spawner._spawn_pending = False
finish_spawn_future.add_done_callback(_clear_spawn_future)
# when spawn finishes (success or failure)
# update failure count and abort if consecutive failure limit
# is reached
def _track_failure_count(f):
if f.cancelled() or f.exception() is None:
# spawn succeeded, reset failure count
self.settings["failure_count"] = 0
return
# spawn failed, increment count and abort if limit reached
self.settings.setdefault("failure_count", 0)
self.settings["failure_count"] += 1
failure_count = self.settings["failure_count"]
failure_limit = spawner.consecutive_failure_limit
if failure_limit and 1 < failure_count < failure_limit:
self.log.warning(
"%i consecutive spawns failed. "
"Hub will exit if failure count reaches %i before succeeding",
failure_count,
failure_limit,
)
if failure_limit and failure_count >= failure_limit:
self.log.critical(
"Aborting due to %i consecutive spawn failures", failure_count
)
# abort in 2 seconds to allow pending handlers to resolve
# mostly propagating errors for the current failures
def abort():
raise SystemExit(1)
IOLoop.current().call_later(2, abort)
finish_spawn_future.add_done_callback(_track_failure_count)
try:
await gen.with_timeout(
timedelta(seconds=self.slow_spawn_timeout), finish_spawn_future
)
except gen.TimeoutError:
# waiting_for_response indicates server process has started,
# but is yet to become responsive.
if spawner._spawn_pending and not spawner._waiting_for_response:
# still in Spawner.start, which is taking a long time
# we shouldn't poll while spawn is incomplete.
self.log.warning(
"User %s is slow to start (timeout=%s)",
user_server_name,
self.slow_spawn_timeout,
)
return
# start has finished, but the server hasn't come up
# check if the server died while we were waiting
poll_start_time = time.perf_counter()
status = await spawner.poll()
SERVER_POLL_DURATION_SECONDS.labels(
status=ServerPollStatus.from_status(status)
).observe(time.perf_counter() - poll_start_time)
if status is not None:
toc = IOLoop.current().time()
self.statsd.timing("spawner.failure", (toc - tic) * 1000)
SERVER_SPAWN_DURATION_SECONDS.labels(
status=ServerSpawnStatus.failure
).observe(time.perf_counter() - spawn_start_time)
raise web.HTTPError(
500,
"Spawner failed to start [status=%s]. The logs for %s may contain details."
% (status, spawner._log_name),
)
if spawner._waiting_for_response:
# hit timeout waiting for response, but server's running.
# Hope that it'll show up soon enough,
# though it's possible that it started at the wrong URL
self.log.warning(
"User %s is slow to become responsive (timeout=%s)",
user_server_name,
self.slow_spawn_timeout,
)
self.log.debug(
"Expecting server for %s at: %s",
user_server_name,
spawner.server.url,
)
if spawner._proxy_pending:
# User.spawn finished, but it hasn't been added to the proxy
# Could be due to load or a slow proxy
self.log.warning(
"User %s is slow to be added to the proxy (timeout=%s)",
user_server_name,
self.slow_spawn_timeout,
)
|
https://github.com/jupyterhub/jupyterhub/issues/3060
|
[E 2020-05-28 22:02:11.206 JupyterHub base:876] Failed to add michaeldzamba:doublegpu7 to proxy!
Traceback (most recent call last):
File "/fn/lib/python3.7/site-packages/jupyterhub/handlers/base.py", line 869, in finish_user_spawn
await self.proxy.add_user(user, server_name)
File "/fn/lib/python3.7/site-packages/jupyterhub/proxy.py", line 274, in add_user
{'user': user.name, 'server_name': server_name},
File "/fn/lib/python3.7/site-packages/jupyterhub_traefik_proxy/kv_proxy.py", line 289, in add_route
await self._wait_for_route(routespec, provider=self.kv_name)
File "/fn/lib/python3.7/site-packages/jupyterhub_traefik_proxy/proxy.py", line 147, in _wait_for_route
timeout=self.check_route_timeout,
File "/fn/lib/python3.7/site-packages/jupyterhub/utils.py", line 177, in exponential_backoff
raise TimeoutError(fail_message)
TimeoutError: Traefik route for /user/michaeldzamba/doublegpu7/ configuration not available
[E 2020-05-28 22:02:11.207 JupyterHub base:878] Stopping michaeldzamba:doublegpu7 to avoid inconsistent state
[I 2020-05-28 22:02:11.208 JupyterHub spawner:1866] Deleting pod jupyter-michaeldzamba
|
TimeoutError
|
async def finish_user_spawn():
"""Finish the user spawn by registering listeners and notifying the proxy.
If the spawner is slow to start, this is passed as an async callback,
otherwise it is called immediately.
"""
# wait for spawn Future
await spawn_future
toc = IOLoop.current().time()
self.log.info("User %s took %.3f seconds to start", user_server_name, toc - tic)
self.statsd.timing("spawner.success", (toc - tic) * 1000)
SERVER_SPAWN_DURATION_SECONDS.labels(status=ServerSpawnStatus.success).observe(
time.perf_counter() - spawn_start_time
)
self.eventlog.record_event(
"hub.jupyter.org/server-action",
1,
{"action": "start", "username": user.name, "servername": server_name},
)
proxy_add_start_time = time.perf_counter()
spawner._proxy_pending = True
try:
await self.proxy.add_user(user, server_name)
PROXY_ADD_DURATION_SECONDS.labels(status="success").observe(
time.perf_counter() - proxy_add_start_time
)
RUNNING_SERVERS.inc()
except Exception:
self.log.exception("Failed to add %s to proxy!", user_server_name)
self.log.error("Stopping %s to avoid inconsistent state", user_server_name)
await user.stop(server_name)
PROXY_ADD_DURATION_SECONDS.labels(status="failure").observe(
time.perf_counter() - proxy_add_start_time
)
else:
spawner.add_poll_callback(self.user_stopped, user, server_name)
finally:
spawner._proxy_pending = False
|
async def finish_user_spawn():
"""Finish the user spawn by registering listeners and notifying the proxy.
If the spawner is slow to start, this is passed as an async callback,
otherwise it is called immediately.
"""
# wait for spawn Future
await spawn_future
toc = IOLoop.current().time()
self.log.info("User %s took %.3f seconds to start", user_server_name, toc - tic)
self.statsd.timing("spawner.success", (toc - tic) * 1000)
SERVER_SPAWN_DURATION_SECONDS.labels(status=ServerSpawnStatus.success).observe(
time.perf_counter() - spawn_start_time
)
self.eventlog.record_event(
"hub.jupyter.org/server-action",
1,
{"action": "start", "username": user.name, "servername": server_name},
)
proxy_add_start_time = time.perf_counter()
spawner._proxy_pending = True
try:
await self.proxy.add_user(user, server_name)
PROXY_ADD_DURATION_SECONDS.labels(status="success").observe(
time.perf_counter() - proxy_add_start_time
)
RUNNING_SERVERS.inc()
except Exception:
self.log.exception("Failed to add %s to proxy!", user_server_name)
self.log.error("Stopping %s to avoid inconsistent state", user_server_name)
await user.stop()
PROXY_ADD_DURATION_SECONDS.labels(status="failure").observe(
time.perf_counter() - proxy_add_start_time
)
else:
spawner.add_poll_callback(self.user_stopped, user, server_name)
finally:
spawner._proxy_pending = False
|
https://github.com/jupyterhub/jupyterhub/issues/3060
|
[E 2020-05-28 22:02:11.206 JupyterHub base:876] Failed to add michaeldzamba:doublegpu7 to proxy!
Traceback (most recent call last):
File "/fn/lib/python3.7/site-packages/jupyterhub/handlers/base.py", line 869, in finish_user_spawn
await self.proxy.add_user(user, server_name)
File "/fn/lib/python3.7/site-packages/jupyterhub/proxy.py", line 274, in add_user
{'user': user.name, 'server_name': server_name},
File "/fn/lib/python3.7/site-packages/jupyterhub_traefik_proxy/kv_proxy.py", line 289, in add_route
await self._wait_for_route(routespec, provider=self.kv_name)
File "/fn/lib/python3.7/site-packages/jupyterhub_traefik_proxy/proxy.py", line 147, in _wait_for_route
timeout=self.check_route_timeout,
File "/fn/lib/python3.7/site-packages/jupyterhub/utils.py", line 177, in exponential_backoff
raise TimeoutError(fail_message)
TimeoutError: Traefik route for /user/michaeldzamba/doublegpu7/ configuration not available
[E 2020-05-28 22:02:11.207 JupyterHub base:878] Stopping michaeldzamba:doublegpu7 to avoid inconsistent state
[I 2020-05-28 22:02:11.208 JupyterHub spawner:1866] Deleting pod jupyter-michaeldzamba
|
TimeoutError
|
def write_error(self, status_code, **kwargs):
"""Write JSON errors instead of HTML"""
exc_info = kwargs.get("exc_info")
message = ""
exception = None
status_message = responses.get(status_code, "Unknown Error")
if exc_info:
exception = exc_info[1]
# get the custom message, if defined
try:
message = exception.log_message % exception.args
except Exception:
pass
# construct the custom reason, if defined
reason = getattr(exception, "reason", "")
if reason:
status_message = reason
if exception and isinstance(exception, SQLAlchemyError):
self.log.warning("Rolling back session due to database error %s", exception)
self.db.rollback()
self.set_header("Content-Type", "application/json")
# allow setting headers from exceptions
# since exception handler clears headers
headers = getattr(exception, "headers", None)
if headers:
for key, value in headers.items():
self.set_header(key, value)
self.write(
json.dumps(
{
"status": status_code,
"message": message or status_message,
}
)
)
|
def write_error(self, status_code, **kwargs):
"""Write JSON errors instead of HTML"""
exc_info = kwargs.get("exc_info")
message = ""
exception = None
status_message = responses.get(status_code, "Unknown Error")
if exc_info:
exception = exc_info[1]
# get the custom message, if defined
try:
message = exception.log_message % exception.args
except Exception:
pass
# construct the custom reason, if defined
reason = getattr(exception, "reason", "")
if reason:
status_message = reason
self.set_header("Content-Type", "application/json")
# allow setting headers from exceptions
# since exception handler clears headers
headers = getattr(exception, "headers", None)
if headers:
for key, value in headers.items():
self.set_header(key, value)
self.write(
json.dumps(
{
"status": status_code,
"message": message or status_message,
}
)
)
|
https://github.com/jupyterhub/jupyterhub/issues/1626
|
[E 2018-01-19 04:14:19.920 JupyterHub web:1518] Exception in exception handler
Traceback (most recent call last):
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1516, in _execute
self._handle_request_exception(e)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1570, in _handle_request_exception
self.send_error(500, exc_info=sys.exc_info())
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1040, in send_error
self.finish()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 105, in finish
super().finish(*args, **kwargs)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 993, in finish
self._log()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1540, in _log
self.application.log_request(self)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 2053, in log_request
self.settings["log_function"](handler)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/log.py", line 90, in log_request
user = handler.get_current_user()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 229, in get_current_user
return self.get_current_user_cookie()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 222, in get_current_user_cookie
return self._user_for_cookie(self.hub.cookie_name)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 206, in _user_for_cookie
u = self.db.query(orm.User).filter(orm.User.cookie_id==cookie_id).first()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2755, in first
ret = list(self[0:1])
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2547, in __getitem__
return list(res)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2855, in __iter__
return self._execute_and_instances(context)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2878, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 945, in execute
return meth(self, multiparams, params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 263, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1053, in _execute_clauseelement
compiled_sql, distilled_params
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1121, in _execute_context
None, None)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1402, in _handle_dbapi_exception
exc_info
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 186, in reraise
raise value.with_traceback(tb)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1114, in _execute_context
conn = self._revalidate_connection()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 424, in _revalidate_connection
"Can't reconnect until invalid "
sqlalchemy.exc.StatementError: (sqlalchemy.exc.InvalidRequestError) Can't reconnect until invalid transaction is rolled back [SQL: 'SELECT users.id AS users_id, users.name AS users_name, users.`admin` AS users_admin, users.last_activity AS users_last_activity, users.cookie_id AS users_cookie_id, users.state AS users_state, users.encrypted_auth_state AS users_encrypted_auth_state \nFROM users \nWHERE users.cookie_id = %(cookie_id_1)s \n LIMIT %(param_1)s'] [parameters: [{}]]
|
sqlalchemy.exc.StatementError
|
async def update_last_activity(self):
"""Update User.last_activity timestamps from the proxy"""
routes = await self.proxy.get_all_routes()
users_count = 0
active_users_count = 0
now = datetime.utcnow()
for prefix, route in routes.items():
route_data = route["data"]
if "user" not in route_data:
# not a user route, ignore it
continue
if "server_name" not in route_data:
continue
users_count += 1
if "last_activity" not in route_data:
# no last activity data (possibly proxy other than CHP)
continue
user = orm.User.find(self.db, route_data["user"])
if user is None:
self.log.warning("Found no user for route: %s", route)
continue
spawner = user.orm_spawners.get(route_data["server_name"])
if spawner is None:
self.log.warning("Found no spawner for route: %s", route)
continue
dt = parse_date(route_data["last_activity"])
if dt.tzinfo:
# strip timezone info to naïve UTC datetime
dt = dt.astimezone(timezone.utc).replace(tzinfo=None)
if user.last_activity:
user.last_activity = max(user.last_activity, dt)
else:
user.last_activity = dt
if spawner.last_activity:
spawner.last_activity = max(spawner.last_activity, dt)
else:
spawner.last_activity = dt
# FIXME: Make this configurable duration. 30 minutes for now!
if (now - user.last_activity).total_seconds() < 30 * 60:
active_users_count += 1
self.statsd.gauge("users.running", users_count)
self.statsd.gauge("users.active", active_users_count)
try:
self.db.commit()
except SQLAlchemyError:
self.log.exception("Rolling back session due to database error")
self.db.rollback()
return
await self.proxy.check_routes(self.users, self._service_map, routes)
|
async def update_last_activity(self):
"""Update User.last_activity timestamps from the proxy"""
routes = await self.proxy.get_all_routes()
users_count = 0
active_users_count = 0
now = datetime.utcnow()
for prefix, route in routes.items():
route_data = route["data"]
if "user" not in route_data:
# not a user route, ignore it
continue
if "server_name" not in route_data:
continue
users_count += 1
if "last_activity" not in route_data:
# no last activity data (possibly proxy other than CHP)
continue
user = orm.User.find(self.db, route_data["user"])
if user is None:
self.log.warning("Found no user for route: %s", route)
continue
spawner = user.orm_spawners.get(route_data["server_name"])
if spawner is None:
self.log.warning("Found no spawner for route: %s", route)
continue
dt = parse_date(route_data["last_activity"])
if dt.tzinfo:
# strip timezone info to naïve UTC datetime
dt = dt.astimezone(timezone.utc).replace(tzinfo=None)
if user.last_activity:
user.last_activity = max(user.last_activity, dt)
else:
user.last_activity = dt
if spawner.last_activity:
spawner.last_activity = max(spawner.last_activity, dt)
else:
spawner.last_activity = dt
# FIXME: Make this configurable duration. 30 minutes for now!
if (now - user.last_activity).total_seconds() < 30 * 60:
active_users_count += 1
self.statsd.gauge("users.running", users_count)
self.statsd.gauge("users.active", active_users_count)
self.db.commit()
await self.proxy.check_routes(self.users, self._service_map, routes)
|
https://github.com/jupyterhub/jupyterhub/issues/1626
|
[E 2018-01-19 04:14:19.920 JupyterHub web:1518] Exception in exception handler
Traceback (most recent call last):
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1516, in _execute
self._handle_request_exception(e)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1570, in _handle_request_exception
self.send_error(500, exc_info=sys.exc_info())
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1040, in send_error
self.finish()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 105, in finish
super().finish(*args, **kwargs)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 993, in finish
self._log()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1540, in _log
self.application.log_request(self)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 2053, in log_request
self.settings["log_function"](handler)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/log.py", line 90, in log_request
user = handler.get_current_user()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 229, in get_current_user
return self.get_current_user_cookie()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 222, in get_current_user_cookie
return self._user_for_cookie(self.hub.cookie_name)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 206, in _user_for_cookie
u = self.db.query(orm.User).filter(orm.User.cookie_id==cookie_id).first()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2755, in first
ret = list(self[0:1])
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2547, in __getitem__
return list(res)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2855, in __iter__
return self._execute_and_instances(context)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2878, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 945, in execute
return meth(self, multiparams, params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 263, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1053, in _execute_clauseelement
compiled_sql, distilled_params
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1121, in _execute_context
None, None)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1402, in _handle_dbapi_exception
exc_info
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 186, in reraise
raise value.with_traceback(tb)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1114, in _execute_context
conn = self._revalidate_connection()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 424, in _revalidate_connection
"Can't reconnect until invalid "
sqlalchemy.exc.StatementError: (sqlalchemy.exc.InvalidRequestError) Can't reconnect until invalid transaction is rolled back [SQL: 'SELECT users.id AS users_id, users.name AS users_name, users.`admin` AS users_admin, users.last_activity AS users_last_activity, users.cookie_id AS users_cookie_id, users.state AS users_state, users.encrypted_auth_state AS users_encrypted_auth_state \nFROM users \nWHERE users.cookie_id = %(cookie_id_1)s \n LIMIT %(param_1)s'] [parameters: [{}]]
|
sqlalchemy.exc.StatementError
|
def get_current_user(self):
"""get current username"""
if not hasattr(self, "_jupyterhub_user"):
try:
user = self.get_current_user_token()
if user is None:
user = self.get_current_user_cookie()
self._jupyterhub_user = user
except Exception:
# don't let errors here raise more than once
self._jupyterhub_user = None
raise
return self._jupyterhub_user
|
def get_current_user(self):
"""get current username"""
user = self.get_current_user_token()
if user is not None:
return user
return self.get_current_user_cookie()
|
https://github.com/jupyterhub/jupyterhub/issues/1626
|
[E 2018-01-19 04:14:19.920 JupyterHub web:1518] Exception in exception handler
Traceback (most recent call last):
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1516, in _execute
self._handle_request_exception(e)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1570, in _handle_request_exception
self.send_error(500, exc_info=sys.exc_info())
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1040, in send_error
self.finish()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 105, in finish
super().finish(*args, **kwargs)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 993, in finish
self._log()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1540, in _log
self.application.log_request(self)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 2053, in log_request
self.settings["log_function"](handler)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/log.py", line 90, in log_request
user = handler.get_current_user()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 229, in get_current_user
return self.get_current_user_cookie()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 222, in get_current_user_cookie
return self._user_for_cookie(self.hub.cookie_name)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 206, in _user_for_cookie
u = self.db.query(orm.User).filter(orm.User.cookie_id==cookie_id).first()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2755, in first
ret = list(self[0:1])
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2547, in __getitem__
return list(res)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2855, in __iter__
return self._execute_and_instances(context)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2878, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 945, in execute
return meth(self, multiparams, params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 263, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1053, in _execute_clauseelement
compiled_sql, distilled_params
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1121, in _execute_context
None, None)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1402, in _handle_dbapi_exception
exc_info
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 186, in reraise
raise value.with_traceback(tb)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1114, in _execute_context
conn = self._revalidate_connection()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 424, in _revalidate_connection
"Can't reconnect until invalid "
sqlalchemy.exc.StatementError: (sqlalchemy.exc.InvalidRequestError) Can't reconnect until invalid transaction is rolled back [SQL: 'SELECT users.id AS users_id, users.name AS users_name, users.`admin` AS users_admin, users.last_activity AS users_last_activity, users.cookie_id AS users_cookie_id, users.state AS users_state, users.encrypted_auth_state AS users_encrypted_auth_state \nFROM users \nWHERE users.cookie_id = %(cookie_id_1)s \n LIMIT %(param_1)s'] [parameters: [{}]]
|
sqlalchemy.exc.StatementError
|
def write_error(self, status_code, **kwargs):
"""render custom error pages"""
exc_info = kwargs.get("exc_info")
message = ""
exception = None
status_message = responses.get(status_code, "Unknown HTTP Error")
if exc_info:
exception = exc_info[1]
# get the custom message, if defined
try:
message = exception.log_message % exception.args
except Exception:
pass
# construct the custom reason, if defined
reason = getattr(exception, "reason", "")
if reason:
message = reasons.get(reason, reason)
if exception and isinstance(exception, SQLAlchemyError):
self.log.warning("Rolling back session due to database error %s", exception)
self.db.rollback()
# build template namespace
ns = dict(
status_code=status_code,
status_message=status_message,
message=message,
extra_error_html=getattr(self, "extra_error_html", ""),
exception=exception,
)
self.set_header("Content-Type", "text/html")
# allow setting headers from exceptions
# since exception handler clears headers
headers = getattr(exception, "headers", None)
if headers:
for key, value in headers.items():
self.set_header(key, value)
# render the template
try:
html = self.render_template("%s.html" % status_code, **ns)
except TemplateNotFound:
self.log.debug("No template for %d", status_code)
html = self.render_template("error.html", **ns)
self.write(html)
|
def write_error(self, status_code, **kwargs):
"""render custom error pages"""
exc_info = kwargs.get("exc_info")
message = ""
exception = None
status_message = responses.get(status_code, "Unknown HTTP Error")
if exc_info:
exception = exc_info[1]
# get the custom message, if defined
try:
message = exception.log_message % exception.args
except Exception:
pass
# construct the custom reason, if defined
reason = getattr(exception, "reason", "")
if reason:
message = reasons.get(reason, reason)
# build template namespace
ns = dict(
status_code=status_code,
status_message=status_message,
message=message,
extra_error_html=getattr(self, "extra_error_html", ""),
exception=exception,
)
self.set_header("Content-Type", "text/html")
# allow setting headers from exceptions
# since exception handler clears headers
headers = getattr(exception, "headers", None)
if headers:
for key, value in headers.items():
self.set_header(key, value)
# render the template
try:
html = self.render_template("%s.html" % status_code, **ns)
except TemplateNotFound:
self.log.debug("No template for %d", status_code)
html = self.render_template("error.html", **ns)
self.write(html)
|
https://github.com/jupyterhub/jupyterhub/issues/1626
|
[E 2018-01-19 04:14:19.920 JupyterHub web:1518] Exception in exception handler
Traceback (most recent call last):
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1516, in _execute
self._handle_request_exception(e)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1570, in _handle_request_exception
self.send_error(500, exc_info=sys.exc_info())
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1040, in send_error
self.finish()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 105, in finish
super().finish(*args, **kwargs)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 993, in finish
self._log()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1540, in _log
self.application.log_request(self)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 2053, in log_request
self.settings["log_function"](handler)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/log.py", line 90, in log_request
user = handler.get_current_user()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 229, in get_current_user
return self.get_current_user_cookie()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 222, in get_current_user_cookie
return self._user_for_cookie(self.hub.cookie_name)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 206, in _user_for_cookie
u = self.db.query(orm.User).filter(orm.User.cookie_id==cookie_id).first()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2755, in first
ret = list(self[0:1])
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2547, in __getitem__
return list(res)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2855, in __iter__
return self._execute_and_instances(context)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2878, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 945, in execute
return meth(self, multiparams, params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 263, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1053, in _execute_clauseelement
compiled_sql, distilled_params
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1121, in _execute_context
None, None)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1402, in _handle_dbapi_exception
exc_info
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 186, in reraise
raise value.with_traceback(tb)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1114, in _execute_context
conn = self._revalidate_connection()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 424, in _revalidate_connection
"Can't reconnect until invalid "
sqlalchemy.exc.StatementError: (sqlalchemy.exc.InvalidRequestError) Can't reconnect until invalid transaction is rolled back [SQL: 'SELECT users.id AS users_id, users.name AS users_name, users.`admin` AS users_admin, users.last_activity AS users_last_activity, users.cookie_id AS users_cookie_id, users.state AS users_state, users.encrypted_auth_state AS users_encrypted_auth_state \nFROM users \nWHERE users.cookie_id = %(cookie_id_1)s \n LIMIT %(param_1)s'] [parameters: [{}]]
|
sqlalchemy.exc.StatementError
|
def _notify_deleted_relationships(session, obj):
"""Expire relationships when an object becomes deleted
Needed to keep relationships up to date.
"""
mapper = inspect(obj).mapper
for prop in mapper.relationships:
if prop.back_populates:
_expire_relationship(obj, prop)
|
def _notify_deleted_relationships(session, obj):
"""Expire relationships when an object becomes deleted
Needed for
"""
mapper = inspect(obj).mapper
for prop in mapper.relationships:
if prop.back_populates:
_expire_relationship(obj, prop)
|
https://github.com/jupyterhub/jupyterhub/issues/1626
|
[E 2018-01-19 04:14:19.920 JupyterHub web:1518] Exception in exception handler
Traceback (most recent call last):
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1516, in _execute
self._handle_request_exception(e)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1570, in _handle_request_exception
self.send_error(500, exc_info=sys.exc_info())
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1040, in send_error
self.finish()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 105, in finish
super().finish(*args, **kwargs)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 993, in finish
self._log()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1540, in _log
self.application.log_request(self)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 2053, in log_request
self.settings["log_function"](handler)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/log.py", line 90, in log_request
user = handler.get_current_user()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 229, in get_current_user
return self.get_current_user_cookie()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 222, in get_current_user_cookie
return self._user_for_cookie(self.hub.cookie_name)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 206, in _user_for_cookie
u = self.db.query(orm.User).filter(orm.User.cookie_id==cookie_id).first()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2755, in first
ret = list(self[0:1])
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2547, in __getitem__
return list(res)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2855, in __iter__
return self._execute_and_instances(context)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2878, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 945, in execute
return meth(self, multiparams, params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 263, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1053, in _execute_clauseelement
compiled_sql, distilled_params
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1121, in _execute_context
None, None)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1402, in _handle_dbapi_exception
exc_info
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 186, in reraise
raise value.with_traceback(tb)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1114, in _execute_context
conn = self._revalidate_connection()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 424, in _revalidate_connection
"Can't reconnect until invalid "
sqlalchemy.exc.StatementError: (sqlalchemy.exc.InvalidRequestError) Can't reconnect until invalid transaction is rolled back [SQL: 'SELECT users.id AS users_id, users.name AS users_name, users.`admin` AS users_admin, users.last_activity AS users_last_activity, users.cookie_id AS users_cookie_id, users.state AS users_state, users.encrypted_auth_state AS users_encrypted_auth_state \nFROM users \nWHERE users.cookie_id = %(cookie_id_1)s \n LIMIT %(param_1)s'] [parameters: [{}]]
|
sqlalchemy.exc.StatementError
|
def new_session_factory(
url="sqlite:///:memory:", reset=False, expire_on_commit=False, **kwargs
):
"""Create a new session at url"""
if url.startswith("sqlite"):
kwargs.setdefault("connect_args", {"check_same_thread": False})
listeners = kwargs.setdefault("listeners", [])
listeners.append(ForeignKeysListener())
elif url.startswith("mysql"):
kwargs.setdefault("pool_recycle", 60)
if url.endswith(":memory:"):
# If we're using an in-memory database, ensure that only one connection
# is ever created.
kwargs.setdefault("poolclass", StaticPool)
engine = create_engine(url, **kwargs)
# enable pessimistic disconnect handling
register_ping_connection(engine)
if reset:
Base.metadata.drop_all(engine)
if mysql_large_prefix_check(engine): # if mysql is allows large indexes
add_row_format(Base) # set format on the tables
# check the db revision (will raise, pointing to `upgrade-db` if version doesn't match)
check_db_revision(engine)
Base.metadata.create_all(engine)
# We set expire_on_commit=False, since we don't actually need
# SQLAlchemy to expire objects after commiting - we don't expect
# concurrent runs of the hub talking to the same db. Turning
# this off gives us a major performance boost
session_factory = sessionmaker(
bind=engine,
expire_on_commit=expire_on_commit,
)
return session_factory
|
def new_session_factory(
url="sqlite:///:memory:", reset=False, expire_on_commit=False, **kwargs
):
"""Create a new session at url"""
if url.startswith("sqlite"):
kwargs.setdefault("connect_args", {"check_same_thread": False})
listeners = kwargs.setdefault("listeners", [])
listeners.append(ForeignKeysListener())
elif url.startswith("mysql"):
kwargs.setdefault("pool_recycle", 60)
if url.endswith(":memory:"):
# If we're using an in-memory database, ensure that only one connection
# is ever created.
kwargs.setdefault("poolclass", StaticPool)
engine = create_engine(url, **kwargs)
if reset:
Base.metadata.drop_all(engine)
if mysql_large_prefix_check(engine): # if mysql is allows large indexes
add_row_format(Base) # set format on the tables
# check the db revision (will raise, pointing to `upgrade-db` if version doesn't match)
check_db_revision(engine)
Base.metadata.create_all(engine)
# We set expire_on_commit=False, since we don't actually need
# SQLAlchemy to expire objects after commiting - we don't expect
# concurrent runs of the hub talking to the same db. Turning
# this off gives us a major performance boost
session_factory = sessionmaker(
bind=engine,
expire_on_commit=expire_on_commit,
)
return session_factory
|
https://github.com/jupyterhub/jupyterhub/issues/1626
|
[E 2018-01-19 04:14:19.920 JupyterHub web:1518] Exception in exception handler
Traceback (most recent call last):
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1516, in _execute
self._handle_request_exception(e)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1570, in _handle_request_exception
self.send_error(500, exc_info=sys.exc_info())
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1040, in send_error
self.finish()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 105, in finish
super().finish(*args, **kwargs)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 993, in finish
self._log()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1540, in _log
self.application.log_request(self)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 2053, in log_request
self.settings["log_function"](handler)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/log.py", line 90, in log_request
user = handler.get_current_user()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 229, in get_current_user
return self.get_current_user_cookie()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 222, in get_current_user_cookie
return self._user_for_cookie(self.hub.cookie_name)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/jupyterhub/handlers/base.py", line 206, in _user_for_cookie
u = self.db.query(orm.User).filter(orm.User.cookie_id==cookie_id).first()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2755, in first
ret = list(self[0:1])
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2547, in __getitem__
return list(res)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2855, in __iter__
return self._execute_and_instances(context)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 2878, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 945, in execute
return meth(self, multiparams, params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 263, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1053, in _execute_clauseelement
compiled_sql, distilled_params
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1121, in _execute_context
None, None)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1402, in _handle_dbapi_exception
exc_info
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 186, in reraise
raise value.with_traceback(tb)
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1114, in _execute_context
conn = self._revalidate_connection()
File "/Users/bhatt/anaconda3/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 424, in _revalidate_connection
"Can't reconnect until invalid "
sqlalchemy.exc.StatementError: (sqlalchemy.exc.InvalidRequestError) Can't reconnect until invalid transaction is rolled back [SQL: 'SELECT users.id AS users_id, users.name AS users_name, users.`admin` AS users_admin, users.last_activity AS users_last_activity, users.cookie_id AS users_cookie_id, users.state AS users_state, users.encrypted_auth_state AS users_encrypted_auth_state \nFROM users \nWHERE users.cookie_id = %(cookie_id_1)s \n LIMIT %(param_1)s'] [parameters: [{}]]
|
sqlalchemy.exc.StatementError
|
def validate(self, obj, value):
"""
Validate that the passed in value is a valid memory specification
It could either be a pure int, when it is taken as a byte value.
If it has one of the suffixes, it is converted into the appropriate
pure byte value.
"""
if isinstance(value, (int, float)):
return int(value)
try:
num = float(value[:-1])
except ValueError:
raise TraitError(
"{val} is not a valid memory specification. Must be an int or a string with suffix K, M, G, T".format(
val=value
)
)
suffix = value[-1]
if suffix not in self.UNIT_SUFFIXES:
raise TraitError(
"{val} is not a valid memory specification. Must be an int or a string with suffix K, M, G, T".format(
val=value
)
)
else:
return int(float(num) * self.UNIT_SUFFIXES[suffix])
|
def validate(self, obj, value):
"""
Validate that the passed in value is a valid memory specification
It could either be a pure int, when it is taken as a byte value.
If it has one of the suffixes, it is converted into the appropriate
pure byte value.
"""
if isinstance(value, int):
return value
num = value[:-1]
suffix = value[-1]
if not num.isdigit() and suffix not in ByteSpecification.UNIT_SUFFIXES:
raise TraitError(
"{val} is not a valid memory specification. Must be an int or a string with suffix K, M, G, T".format(
val=value
)
)
else:
return int(num) * ByteSpecification.UNIT_SUFFIXES[suffix]
|
https://github.com/jupyterhub/jupyterhub/issues/1089
|
[E 2017-04-18 05:39:02.270 JupyterHub app:1527]
Traceback (most recent call last):
File "/usr/local/lib/python3.4/dist-packages/jupyterhub/app.py", line 1524, in launch_instance_async
yield self.initialize(argv)
File "/usr/local/lib/python3.4/dist-packages/jupyterhub/app.py", line 1315, in initialize
yield self.init_spawners()
File "/usr/local/lib/python3.4/dist-packages/jupyterhub/app.py", line 1084, in init_spawners
self.users[orm_user.id] = user = User(orm_user, self.tornado_settings)
File "/usr/local/lib/python3.4/dist-packages/jupyterhub/user.py", line 128, in __init__
config=self.settings.get('config'),
File "/usr/local/lib/python3.4/dist-packages/kubespawner/spawner.py", line 29, in __init__
super().__init__(*args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/jupyterhub/spawner.py", line 345, in __init__
super(Spawner, self).__init__(**kwargs)
File "/usr/local/lib/python3.4/dist-packages/traitlets/config/configurable.py", line 84, in __init__
self.config = config
File "/usr/local/lib/python3.4/dist-packages/traitlets/traitlets.py", line 585, in __set__
self.set(obj, value)
File "/usr/local/lib/python3.4/dist-packages/traitlets/traitlets.py", line 574, in set
obj._notify_trait(self.name, old_value, new_value)
File "/usr/local/lib/python3.4/dist-packages/traitlets/traitlets.py", line 1139, in _notify_trait
type='change',
File "/usr/local/lib/python3.4/dist-packages/traitlets/traitlets.py", line 1176, in notify_change
c(change)
File "/usr/local/lib/python3.4/dist-packages/traitlets/traitlets.py", line 819, in compatible_observer
return func(self, change)
File "/usr/local/lib/python3.4/dist-packages/traitlets/config/configurable.py", line 186, in _config_changed
self._load_config(change.new, traits=traits, section_names=section_names)
File "/usr/local/lib/python3.4/dist-packages/traitlets/config/configurable.py", line 153, in _load_config
setattr(self, name, deepcopy(config_value))
File "/usr/local/lib/python3.4/dist-packages/traitlets/traitlets.py", line 585, in __set__
self.set(obj, value)
File "/usr/local/lib/python3.4/dist-packages/traitlets/traitlets.py", line 559, in set
new_value = self._validate(obj, value)
File "/usr/local/lib/python3.4/dist-packages/traitlets/traitlets.py", line 591, in _validate
value = self.validate(obj, value)
File "/usr/local/lib/python3.4/dist-packages/jupyterhub/traitlets.py", line 71, in validate
return int(num) * ByteSpecification.UNIT_SUFFIXES[suffix]
ValueError: invalid literal for int() with base 10: '1.5'
|
ValueError
|
def init_spawners(self):
db = self.db
user_summaries = [""]
def _user_summary(user):
parts = ["{0: >8}".format(user.name)]
if user.admin:
parts.append("admin")
for name, spawner in sorted(user.spawners.items(), key=itemgetter(0)):
if spawner.server:
parts.append("%s:%s running at %s" % (user.name, name, spawner.server))
return " ".join(parts)
@gen.coroutine
def user_stopped(user, server_name):
spawner = user.spawners[server_name]
status = yield spawner.poll()
self.log.warning(
"User %s server stopped with exit code: %s",
user.name,
status,
)
yield self.proxy.delete_user(user, server_name)
yield user.stop(server_name)
for orm_user in db.query(orm.User):
self.users[orm_user.id] = user = User(orm_user, self.tornado_settings)
self.log.debug("Loading state for %s from db", user.name)
for name, spawner in user.spawners.items():
status = 0
if spawner.server:
try:
status = yield spawner.poll()
except Exception:
self.log.exception(
"Failed to poll spawner for %s, assuming the spawner is not running.",
spawner._log_name,
)
status = -1
if status is None:
self.log.info("%s still running", user.name)
spawner.add_poll_callback(user_stopped, user, name)
spawner.start_polling()
else:
# user not running. This is expected if server is None,
# but indicates the user's server died while the Hub wasn't running
# if spawner.server is defined.
if spawner.server:
self.log.warning(
"%s appears to have stopped while the Hub was down",
spawner._log_name,
)
# remove server entry from db
db.delete(spawner.orm_spawner.server)
spawner.server = None
else:
self.log.debug("%s not running", spawner._log_name)
db.commit()
user_summaries.append(_user_summary(user))
self.log.debug("Loaded users: %s", "\n".join(user_summaries))
db.commit()
|
def init_spawners(self):
db = self.db
user_summaries = [""]
def _user_summary(user):
parts = ["{0: >8}".format(user.name)]
if user.admin:
parts.append("admin")
for name, spawner in sorted(user.spawners.items(), key=itemgetter(0)):
if spawner.server:
parts.append("%s:%s running at %s" % (user.name, name, spawner.server))
return " ".join(parts)
@gen.coroutine
def user_stopped(user, server_name):
spawner = user.spawners[server_name]
status = yield spawner.poll()
self.log.warning(
"User %s server stopped with exit code: %s",
user.name,
status,
)
yield self.proxy.delete_user(user, server_name)
yield user.stop(server_name)
for orm_user in db.query(orm.User):
self.users[orm_user.id] = user = User(orm_user, self.tornado_settings)
self.log.debug("Loading state for %s from db", user.name)
for name, spawner in user.spawners.items():
status = 0
if spawner.server:
try:
status = yield spawner.poll()
except Exception:
self.log.exception(
"Failed to poll spawner for %s, assuming the spawner is not running.",
user.name if name else "%s|%s" % (user.name, name),
)
status = -1
if status is None:
self.log.info("%s still running", user.name)
spawner.add_poll_callback(user_stopped, user, name)
spawner.start_polling()
else:
# user not running. This is expected if server is None,
# but indicates the user's server died while the Hub wasn't running
# if spawner.server is defined.
log = self.log.warning if spawner.server else self.log.debug
log("%s not running.", user.name)
# remove all server or servers entry from db related to the user
if spawner.server:
db.delete(spawner.orm_spawner.server)
db.commit()
user_summaries.append(_user_summary(user))
self.log.debug("Loaded users: %s", "\n".join(user_summaries))
db.commit()
|
https://github.com/jupyterhub/jupyterhub/issues/1414
|
singleuser.cmd jupyterhub-singleuser
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1623, in launch_instance_async
yield self.initialize(argv)
File "/usr/lib/python3.5/types.py", line 179, in throw
return self.__wrapped.throw(tp, *rest)
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1385, in initialize
yield self.init_spawners()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1240, in init_spawners
db.commit()
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/session.py", line 906, in commit
self.transaction.commit()
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/session.py", line 461, in commit
self._prepare_impl()
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/session.py", line 441, in _prepare_impl
self.session.flush()
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/session.py", line 2171, in flush
self._flush(objects)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/session.py", line 2291, in _flush
transaction.rollback(_capture_exception=True)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/langhelpers.py", line 66, in __exit__
compat.reraise(exc_type, exc_value, exc_tb)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/compat.py", line 187, in reraise
raise value
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/session.py", line 2255, in _flush
flush_context.execute()
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/unitofwork.py", line 389, in execute
rec.execute(self)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/unitofwork.py", line 577, in execute
uow
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/persistence.py", line 258, in delete_obj
cached_connections, mapper, table, delete)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/persistence.py", line 931, in _emit_delete_statements
c = connection.execute(statement, del_objects)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 945, in execute
return meth(self, multiparams, params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/sql/elements.py", line 263, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1053, in _execute_clauseelement
compiled_sql, distilled_params
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1189, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1402, in _handle_dbapi_exception
exc_info
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/compat.py", line 186, in reraise
raise value.with_traceback(tb)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/default.py", line 470, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.IntegrityError: (psycopg2.IntegrityError) update or delete on table "servers" violates foreign key constraint "spawners_server_id_fkey" on table "spawners"
DETAIL: Key (id)=(24433) is still referenced from table "spawners".
[SQL: 'DELETE FROM servers WHERE servers.id = %(id)s'] [parameters: {'id': 24433}]
|
sqlalchemy.exc.IntegrityError
|
def stop(self):
"""Stop a managed service"""
if not self.managed:
raise RuntimeError("Cannot stop unmanaged service %s" % self)
if self.spawner:
if self.orm.server:
self.db.delete(self.orm.server)
self.db.commit()
self.spawner.stop_polling()
return self.spawner.stop()
|
def stop(self):
"""Stop a managed service"""
if not self.managed:
raise RuntimeError("Cannot stop unmanaged service %s" % self)
if self.spawner:
self.spawner.stop_polling()
return self.spawner.stop()
|
https://github.com/jupyterhub/jupyterhub/issues/1414
|
singleuser.cmd jupyterhub-singleuser
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1623, in launch_instance_async
yield self.initialize(argv)
File "/usr/lib/python3.5/types.py", line 179, in throw
return self.__wrapped.throw(tp, *rest)
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1385, in initialize
yield self.init_spawners()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1240, in init_spawners
db.commit()
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/session.py", line 906, in commit
self.transaction.commit()
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/session.py", line 461, in commit
self._prepare_impl()
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/session.py", line 441, in _prepare_impl
self.session.flush()
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/session.py", line 2171, in flush
self._flush(objects)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/session.py", line 2291, in _flush
transaction.rollback(_capture_exception=True)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/langhelpers.py", line 66, in __exit__
compat.reraise(exc_type, exc_value, exc_tb)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/compat.py", line 187, in reraise
raise value
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/session.py", line 2255, in _flush
flush_context.execute()
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/unitofwork.py", line 389, in execute
rec.execute(self)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/unitofwork.py", line 577, in execute
uow
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/persistence.py", line 258, in delete_obj
cached_connections, mapper, table, delete)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/orm/persistence.py", line 931, in _emit_delete_statements
c = connection.execute(statement, del_objects)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 945, in execute
return meth(self, multiparams, params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/sql/elements.py", line 263, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1053, in _execute_clauseelement
compiled_sql, distilled_params
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1189, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1402, in _handle_dbapi_exception
exc_info
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/compat.py", line 186, in reraise
raise value.with_traceback(tb)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/default.py", line 470, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.IntegrityError: (psycopg2.IntegrityError) update or delete on table "servers" violates foreign key constraint "spawners_server_id_fkey" on table "spawners"
DETAIL: Key (id)=(24433) is still referenced from table "spawners".
[SQL: 'DELETE FROM servers WHERE servers.id = %(id)s'] [parameters: {'id': 24433}]
|
sqlalchemy.exc.IntegrityError
|
def get(self, token):
orm_token = orm.APIToken.find(self.db, token)
if orm_token is None:
orm_token = orm.OAuthAccessToken.find(self.db, token)
if orm_token is None:
raise web.HTTPError(404)
if orm_token.user:
model = self.user_model(self.users[orm_token.user])
elif orm_token.service:
model = self.service_model(orm_token.service)
else:
self.log.warning("%s has no user or service. Deleting..." % orm_token)
self.db.delete(orm_token)
self.db.commit()
raise web.HTTPError(404)
self.write(json.dumps(model))
|
def get(self, token):
orm_token = orm.APIToken.find(self.db, token)
if orm_token is None:
orm_token = orm.OAuthAccessToken.find(self.db, token)
if orm_token is None:
raise web.HTTPError(404)
if orm_token.user:
model = self.user_model(self.users[orm_token.user])
elif orm_token.service:
model = self.service_model(orm_token.service)
self.write(json.dumps(model))
|
https://github.com/jupyterhub/jupyterhub/issues/1309
|
Traceback (most recent call last):
File "/usr/lib64/python3.4/site-packages/tornado/web.py", line 1509, in _execute
result = method(*self.path_args, **self.path_kwargs)
File "/usr/lib/python3.4/site-packages/jupyterhub/utils.py", line 193, in decorated
return method(self, *args, **kwargs)
File "/usr/lib/python3.4/site-packages/jupyterhub/apihandlers/auth.py", line 27, in get
elif orm_token.service:
AttributeError: 'OAuthAccessToken' object has no attribute 'service'
|
AttributeError
|
def save_token(self, access_token):
"""
Stores an access token in the database.
:param access_token: An instance of :class:`oauth2.datatype.AccessToken`.
"""
user = self.db.query(orm.User).filter(orm.User.id == access_token.user_id).first()
if user is None:
raise ValueError("No user for access token: %s" % access_token.user_id)
orm_access_token = orm.OAuthAccessToken(
generated=True,
client_id=access_token.client_id,
grant_type=access_token.grant_type,
expires_at=access_token.expires_at,
refresh_token=access_token.refresh_token,
refresh_expires_at=access_token.refresh_expires_at,
token=access_token.token,
user=user,
)
self.db.add(orm_access_token)
self.db.commit()
|
def save_token(self, access_token):
"""
Stores an access token in the database.
:param access_token: An instance of :class:`oauth2.datatype.AccessToken`.
"""
user = self.db.query(orm.User).filter(orm.User.id == access_token.user_id).first()
orm_access_token = orm.OAuthAccessToken(
client_id=access_token.client_id,
grant_type=access_token.grant_type,
expires_at=access_token.expires_at,
refresh_token=access_token.refresh_token,
refresh_expires_at=access_token.refresh_expires_at,
token=access_token.token,
user=user,
)
self.db.add(orm_access_token)
self.db.commit()
|
https://github.com/jupyterhub/jupyterhub/issues/1309
|
Traceback (most recent call last):
File "/usr/lib64/python3.4/site-packages/tornado/web.py", line 1509, in _execute
result = method(*self.path_args, **self.path_kwargs)
File "/usr/lib/python3.4/site-packages/jupyterhub/utils.py", line 193, in decorated
return method(self, *args, **kwargs)
File "/usr/lib/python3.4/site-packages/jupyterhub/apihandlers/auth.py", line 27, in get
elif orm_token.service:
AttributeError: 'OAuthAccessToken' object has no attribute 'service'
|
AttributeError
|
def upgrade():
# proxy/table info is no longer in the database
op.drop_table("proxies")
op.drop_table("hubs")
# drop some columns no longer in use
try:
op.drop_column("users", "auth_state")
op.drop_column("users", "_server_id")
except sa.exc.OperationalError:
# this won't be a problem moving forward, but downgrade will fail
if op.get_context().dialect.name == "sqlite":
logger.warning(
"sqlite cannot drop columns. Leaving unused old columns in place."
)
else:
raise
op.add_column("users", sa.Column("encrypted_auth_state", sa.types.LargeBinary))
|
def upgrade():
try:
op.drop_column("users", "auth_state")
except sa.exc.OperationalError as e:
# sqlite3 can't drop columns
warnings.warn("Failed to drop column: %s" % e)
op.add_column("users", sa.Column("encrypted_auth_state", sa.types.LargeBinary))
|
https://github.com/jupyterhub/jupyterhub/issues/1162
|
$ jupyterhub
...
$ jupyterhub upgrade-db
INFO [alembic.runtime.migration] Context impl SQLiteImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
INFO [alembic.runtime.migration] Running upgrade -> 19c0846f6344, base revision for 0.5
INFO [alembic.runtime.migration] Running upgrade 19c0846f6344 -> eeb276e51423, auth_state
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/default.py", line 470, in do_execute
cursor.execute(statement, parameters)
sqlite3.OperationalError: duplicate column name: auth_state
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/bin/alembic", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.5/dist-packages/alembic/config.py", line 479, in main
CommandLine(prog=prog).main(argv=argv)
File "/usr/local/lib/python3.5/dist-packages/alembic/config.py", line 473, in main
self.run_cmd(cfg, options)
File "/usr/local/lib/python3.5/dist-packages/alembic/config.py", line 456, in run_cmd
**dict((k, getattr(options, k, None)) for k in kwarg)
File "/usr/local/lib/python3.5/dist-packages/alembic/command.py", line 254, in upgrade
script.run_env()
File "/usr/local/lib/python3.5/dist-packages/alembic/script/base.py", line 416, in run_env
util.load_python_file(self.dir, 'env.py')
File "/usr/local/lib/python3.5/dist-packages/alembic/util/pyfiles.py", line 93, in load_python_file
module = load_module_py(module_id, path)
File "/usr/local/lib/python3.5/dist-packages/alembic/util/compat.py", line 64, in load_module_py
module_id, path).load_module(module_id)
File "<frozen importlib._bootstrap_external>", line 388, in _check_name_wrapper
File "<frozen importlib._bootstrap_external>", line 809, in load_module
File "<frozen importlib._bootstrap_external>", line 668, in load_module
File "<frozen importlib._bootstrap>", line 268, in _load_module_shim
File "<frozen importlib._bootstrap>", line 693, in _load
File "<frozen importlib._bootstrap>", line 673, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 665, in exec_module
File "<frozen importlib._bootstrap>", line 222, in _call_with_frames_removed
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/alembic/env.py", line 70, in <module>
run_migrations_online()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/alembic/env.py", line 65, in run_migrations_online
context.run_migrations()
File "<string>", line 8, in run_migrations
File "/usr/local/lib/python3.5/dist-packages/alembic/runtime/environment.py", line 817, in run_migrations
self.get_context().run_migrations(**kw)
File "/usr/local/lib/python3.5/dist-packages/alembic/runtime/migration.py", line 323, in run_migrations
step.migration_fn(**kw)
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/alembic/versions/eeb276e51423_auth_state.py", line 21, in upgrade
op.add_column('users', sa.Column('auth_state', JSONDict))
File "<string>", line 8, in add_column
File "<string>", line 3, in add_column
File "/usr/local/lib/python3.5/dist-packages/alembic/operations/ops.py", line 1551, in add_column
return operations.invoke(op)
File "/usr/local/lib/python3.5/dist-packages/alembic/operations/base.py", line 318, in invoke
return fn(self, operation)
File "/usr/local/lib/python3.5/dist-packages/alembic/operations/toimpl.py", line 123, in add_column
schema=schema
File "/usr/local/lib/python3.5/dist-packages/alembic/ddl/impl.py", line 172, in add_column
self._exec(base.AddColumn(table_name, column, schema=schema))
File "/usr/local/lib/python3.5/dist-packages/alembic/ddl/impl.py", line 118, in _exec
return conn.execute(construct, *multiparams, **params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 945, in execute
return meth(self, multiparams, params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/sql/ddl.py", line 68, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1002, in _execute_ddl
compiled
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1189, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1402, in _handle_dbapi_exception
exc_info
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/compat.py", line 186, in reraise
raise value.with_traceback(tb)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/default.py", line 470, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) duplicate column name: auth_state [SQL: 'ALTER TABLE users ADD COLUMN auth_state TEXT']
[E 2017-06-02 19:55:16.017 JupyterHub app:1527]
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1525, in launch_instance_async
yield self.start()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1436, in start
self.subapp.start()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 185, in start
dbutil.upgrade(hub.db_url)
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/dbutil.py", line 80, in upgrade
['alembic', '-c', alembic_ini, 'upgrade', revision]
File "/usr/lib/python3.5/subprocess.py", line 581, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['alembic', '-c', '/tmp/tmpkqvnuq0a/alembic.ini', 'upgrade', 'head']' returned non-zero exit status 1
|
sqlite3.OperationalError
|
def downgrade():
# drop all the new tables
engine = op.get_bind().engine
for table in ("oauth_clients", "oauth_codes", "oauth_access_tokens", "spawners"):
if engine.has_table(table):
op.drop_table(table)
op.drop_column("users", "encrypted_auth_state")
op.add_column("users", sa.Column("auth_state", JSONDict))
op.add_column(
"users", sa.Column("_server_id", sa.Integer, sa.ForeignKey("servers.id"))
)
|
def downgrade():
op.drop_column("users", "encrypted_auth_state")
op.add_column("users", sa.Column("auth_state", JSONDict))
|
https://github.com/jupyterhub/jupyterhub/issues/1162
|
$ jupyterhub
...
$ jupyterhub upgrade-db
INFO [alembic.runtime.migration] Context impl SQLiteImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
INFO [alembic.runtime.migration] Running upgrade -> 19c0846f6344, base revision for 0.5
INFO [alembic.runtime.migration] Running upgrade 19c0846f6344 -> eeb276e51423, auth_state
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/default.py", line 470, in do_execute
cursor.execute(statement, parameters)
sqlite3.OperationalError: duplicate column name: auth_state
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/bin/alembic", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.5/dist-packages/alembic/config.py", line 479, in main
CommandLine(prog=prog).main(argv=argv)
File "/usr/local/lib/python3.5/dist-packages/alembic/config.py", line 473, in main
self.run_cmd(cfg, options)
File "/usr/local/lib/python3.5/dist-packages/alembic/config.py", line 456, in run_cmd
**dict((k, getattr(options, k, None)) for k in kwarg)
File "/usr/local/lib/python3.5/dist-packages/alembic/command.py", line 254, in upgrade
script.run_env()
File "/usr/local/lib/python3.5/dist-packages/alembic/script/base.py", line 416, in run_env
util.load_python_file(self.dir, 'env.py')
File "/usr/local/lib/python3.5/dist-packages/alembic/util/pyfiles.py", line 93, in load_python_file
module = load_module_py(module_id, path)
File "/usr/local/lib/python3.5/dist-packages/alembic/util/compat.py", line 64, in load_module_py
module_id, path).load_module(module_id)
File "<frozen importlib._bootstrap_external>", line 388, in _check_name_wrapper
File "<frozen importlib._bootstrap_external>", line 809, in load_module
File "<frozen importlib._bootstrap_external>", line 668, in load_module
File "<frozen importlib._bootstrap>", line 268, in _load_module_shim
File "<frozen importlib._bootstrap>", line 693, in _load
File "<frozen importlib._bootstrap>", line 673, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 665, in exec_module
File "<frozen importlib._bootstrap>", line 222, in _call_with_frames_removed
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/alembic/env.py", line 70, in <module>
run_migrations_online()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/alembic/env.py", line 65, in run_migrations_online
context.run_migrations()
File "<string>", line 8, in run_migrations
File "/usr/local/lib/python3.5/dist-packages/alembic/runtime/environment.py", line 817, in run_migrations
self.get_context().run_migrations(**kw)
File "/usr/local/lib/python3.5/dist-packages/alembic/runtime/migration.py", line 323, in run_migrations
step.migration_fn(**kw)
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/alembic/versions/eeb276e51423_auth_state.py", line 21, in upgrade
op.add_column('users', sa.Column('auth_state', JSONDict))
File "<string>", line 8, in add_column
File "<string>", line 3, in add_column
File "/usr/local/lib/python3.5/dist-packages/alembic/operations/ops.py", line 1551, in add_column
return operations.invoke(op)
File "/usr/local/lib/python3.5/dist-packages/alembic/operations/base.py", line 318, in invoke
return fn(self, operation)
File "/usr/local/lib/python3.5/dist-packages/alembic/operations/toimpl.py", line 123, in add_column
schema=schema
File "/usr/local/lib/python3.5/dist-packages/alembic/ddl/impl.py", line 172, in add_column
self._exec(base.AddColumn(table_name, column, schema=schema))
File "/usr/local/lib/python3.5/dist-packages/alembic/ddl/impl.py", line 118, in _exec
return conn.execute(construct, *multiparams, **params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 945, in execute
return meth(self, multiparams, params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/sql/ddl.py", line 68, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1002, in _execute_ddl
compiled
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1189, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1402, in _handle_dbapi_exception
exc_info
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/compat.py", line 186, in reraise
raise value.with_traceback(tb)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/default.py", line 470, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) duplicate column name: auth_state [SQL: 'ALTER TABLE users ADD COLUMN auth_state TEXT']
[E 2017-06-02 19:55:16.017 JupyterHub app:1527]
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1525, in launch_instance_async
yield self.start()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1436, in start
self.subapp.start()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 185, in start
dbutil.upgrade(hub.db_url)
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/dbutil.py", line 80, in upgrade
['alembic', '-c', alembic_ini, 'upgrade', revision]
File "/usr/lib/python3.5/subprocess.py", line 581, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['alembic', '-c', '/tmp/tmpkqvnuq0a/alembic.ini', 'upgrade', 'head']' returned non-zero exit status 1
|
sqlite3.OperationalError
|
def start(self):
hub = JupyterHub(parent=self)
hub.load_config_file(hub.config_file)
self.log = hub.log
if hub.db_url.startswith("sqlite:///"):
db_file = hub.db_url.split(":///", 1)[1]
self._backup_db_file(db_file)
self.log.info("Upgrading %s", hub.db_url)
dbutil.upgrade(hub.db_url)
|
def start(self):
hub = JupyterHub(parent=self)
hub.load_config_file(hub.config_file)
if hub.db_url.startswith("sqlite:///"):
db_file = hub.db_url.split(":///", 1)[1]
self._backup_db_file(db_file)
self.log.info("Upgrading %s", hub.db_url)
dbutil.upgrade(hub.db_url)
|
https://github.com/jupyterhub/jupyterhub/issues/1162
|
$ jupyterhub
...
$ jupyterhub upgrade-db
INFO [alembic.runtime.migration] Context impl SQLiteImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
INFO [alembic.runtime.migration] Running upgrade -> 19c0846f6344, base revision for 0.5
INFO [alembic.runtime.migration] Running upgrade 19c0846f6344 -> eeb276e51423, auth_state
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/default.py", line 470, in do_execute
cursor.execute(statement, parameters)
sqlite3.OperationalError: duplicate column name: auth_state
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/bin/alembic", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.5/dist-packages/alembic/config.py", line 479, in main
CommandLine(prog=prog).main(argv=argv)
File "/usr/local/lib/python3.5/dist-packages/alembic/config.py", line 473, in main
self.run_cmd(cfg, options)
File "/usr/local/lib/python3.5/dist-packages/alembic/config.py", line 456, in run_cmd
**dict((k, getattr(options, k, None)) for k in kwarg)
File "/usr/local/lib/python3.5/dist-packages/alembic/command.py", line 254, in upgrade
script.run_env()
File "/usr/local/lib/python3.5/dist-packages/alembic/script/base.py", line 416, in run_env
util.load_python_file(self.dir, 'env.py')
File "/usr/local/lib/python3.5/dist-packages/alembic/util/pyfiles.py", line 93, in load_python_file
module = load_module_py(module_id, path)
File "/usr/local/lib/python3.5/dist-packages/alembic/util/compat.py", line 64, in load_module_py
module_id, path).load_module(module_id)
File "<frozen importlib._bootstrap_external>", line 388, in _check_name_wrapper
File "<frozen importlib._bootstrap_external>", line 809, in load_module
File "<frozen importlib._bootstrap_external>", line 668, in load_module
File "<frozen importlib._bootstrap>", line 268, in _load_module_shim
File "<frozen importlib._bootstrap>", line 693, in _load
File "<frozen importlib._bootstrap>", line 673, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 665, in exec_module
File "<frozen importlib._bootstrap>", line 222, in _call_with_frames_removed
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/alembic/env.py", line 70, in <module>
run_migrations_online()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/alembic/env.py", line 65, in run_migrations_online
context.run_migrations()
File "<string>", line 8, in run_migrations
File "/usr/local/lib/python3.5/dist-packages/alembic/runtime/environment.py", line 817, in run_migrations
self.get_context().run_migrations(**kw)
File "/usr/local/lib/python3.5/dist-packages/alembic/runtime/migration.py", line 323, in run_migrations
step.migration_fn(**kw)
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/alembic/versions/eeb276e51423_auth_state.py", line 21, in upgrade
op.add_column('users', sa.Column('auth_state', JSONDict))
File "<string>", line 8, in add_column
File "<string>", line 3, in add_column
File "/usr/local/lib/python3.5/dist-packages/alembic/operations/ops.py", line 1551, in add_column
return operations.invoke(op)
File "/usr/local/lib/python3.5/dist-packages/alembic/operations/base.py", line 318, in invoke
return fn(self, operation)
File "/usr/local/lib/python3.5/dist-packages/alembic/operations/toimpl.py", line 123, in add_column
schema=schema
File "/usr/local/lib/python3.5/dist-packages/alembic/ddl/impl.py", line 172, in add_column
self._exec(base.AddColumn(table_name, column, schema=schema))
File "/usr/local/lib/python3.5/dist-packages/alembic/ddl/impl.py", line 118, in _exec
return conn.execute(construct, *multiparams, **params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 945, in execute
return meth(self, multiparams, params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/sql/ddl.py", line 68, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1002, in _execute_ddl
compiled
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1189, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1402, in _handle_dbapi_exception
exc_info
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/compat.py", line 186, in reraise
raise value.with_traceback(tb)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/default.py", line 470, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) duplicate column name: auth_state [SQL: 'ALTER TABLE users ADD COLUMN auth_state TEXT']
[E 2017-06-02 19:55:16.017 JupyterHub app:1527]
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1525, in launch_instance_async
yield self.start()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1436, in start
self.subapp.start()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 185, in start
dbutil.upgrade(hub.db_url)
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/dbutil.py", line 80, in upgrade
['alembic', '-c', alembic_ini, 'upgrade', revision]
File "/usr/lib/python3.5/subprocess.py", line 581, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['alembic', '-c', '/tmp/tmpkqvnuq0a/alembic.ini', 'upgrade', 'head']' returned non-zero exit status 1
|
sqlite3.OperationalError
|
def init_db(self):
"""Create the database connection"""
self.log.debug("Connecting to db: %s", self.db_url)
try:
self.session_factory = orm.new_session_factory(
self.db_url, reset=self.reset_db, echo=self.debug_db, **self.db_kwargs
)
# trigger constructing thread local db property
self.db = scoped_session(self.session_factory)()
except OperationalError as e:
self.log.error("Failed to connect to db: %s", self.db_url)
self.log.debug("Database error was:", exc_info=True)
if self.db_url.startswith("sqlite:///"):
self._check_db_path(self.db_url.split(":///", 1)[1])
self.log.critical(
"\n".join(
[
"If you recently upgraded JupyterHub, try running",
" jupyterhub upgrade-db",
"to upgrade your JupyterHub database schema",
]
)
)
self.exit(1)
except orm.DatabaseSchemaMismatch as e:
self.exit(e)
|
def init_db(self):
"""Create the database connection"""
self.log.debug("Connecting to db: %s", self.db_url)
try:
self.session_factory = orm.new_session_factory(
self.db_url, reset=self.reset_db, echo=self.debug_db, **self.db_kwargs
)
# trigger constructing thread local db property
self.db = scoped_session(self.session_factory)()
except OperationalError as e:
self.log.error("Failed to connect to db: %s", self.db_url)
self.log.debug("Database error was:", exc_info=True)
if self.db_url.startswith("sqlite:///"):
self._check_db_path(self.db_url.split(":///", 1)[1])
self.log.critical(
"\n".join(
[
"If you recently upgraded JupyterHub, try running",
" jupyterhub upgrade-db",
"to upgrade your JupyterHub database schema",
]
)
)
self.exit(1)
|
https://github.com/jupyterhub/jupyterhub/issues/1162
|
$ jupyterhub
...
$ jupyterhub upgrade-db
INFO [alembic.runtime.migration] Context impl SQLiteImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
INFO [alembic.runtime.migration] Running upgrade -> 19c0846f6344, base revision for 0.5
INFO [alembic.runtime.migration] Running upgrade 19c0846f6344 -> eeb276e51423, auth_state
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/default.py", line 470, in do_execute
cursor.execute(statement, parameters)
sqlite3.OperationalError: duplicate column name: auth_state
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/bin/alembic", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.5/dist-packages/alembic/config.py", line 479, in main
CommandLine(prog=prog).main(argv=argv)
File "/usr/local/lib/python3.5/dist-packages/alembic/config.py", line 473, in main
self.run_cmd(cfg, options)
File "/usr/local/lib/python3.5/dist-packages/alembic/config.py", line 456, in run_cmd
**dict((k, getattr(options, k, None)) for k in kwarg)
File "/usr/local/lib/python3.5/dist-packages/alembic/command.py", line 254, in upgrade
script.run_env()
File "/usr/local/lib/python3.5/dist-packages/alembic/script/base.py", line 416, in run_env
util.load_python_file(self.dir, 'env.py')
File "/usr/local/lib/python3.5/dist-packages/alembic/util/pyfiles.py", line 93, in load_python_file
module = load_module_py(module_id, path)
File "/usr/local/lib/python3.5/dist-packages/alembic/util/compat.py", line 64, in load_module_py
module_id, path).load_module(module_id)
File "<frozen importlib._bootstrap_external>", line 388, in _check_name_wrapper
File "<frozen importlib._bootstrap_external>", line 809, in load_module
File "<frozen importlib._bootstrap_external>", line 668, in load_module
File "<frozen importlib._bootstrap>", line 268, in _load_module_shim
File "<frozen importlib._bootstrap>", line 693, in _load
File "<frozen importlib._bootstrap>", line 673, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 665, in exec_module
File "<frozen importlib._bootstrap>", line 222, in _call_with_frames_removed
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/alembic/env.py", line 70, in <module>
run_migrations_online()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/alembic/env.py", line 65, in run_migrations_online
context.run_migrations()
File "<string>", line 8, in run_migrations
File "/usr/local/lib/python3.5/dist-packages/alembic/runtime/environment.py", line 817, in run_migrations
self.get_context().run_migrations(**kw)
File "/usr/local/lib/python3.5/dist-packages/alembic/runtime/migration.py", line 323, in run_migrations
step.migration_fn(**kw)
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/alembic/versions/eeb276e51423_auth_state.py", line 21, in upgrade
op.add_column('users', sa.Column('auth_state', JSONDict))
File "<string>", line 8, in add_column
File "<string>", line 3, in add_column
File "/usr/local/lib/python3.5/dist-packages/alembic/operations/ops.py", line 1551, in add_column
return operations.invoke(op)
File "/usr/local/lib/python3.5/dist-packages/alembic/operations/base.py", line 318, in invoke
return fn(self, operation)
File "/usr/local/lib/python3.5/dist-packages/alembic/operations/toimpl.py", line 123, in add_column
schema=schema
File "/usr/local/lib/python3.5/dist-packages/alembic/ddl/impl.py", line 172, in add_column
self._exec(base.AddColumn(table_name, column, schema=schema))
File "/usr/local/lib/python3.5/dist-packages/alembic/ddl/impl.py", line 118, in _exec
return conn.execute(construct, *multiparams, **params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 945, in execute
return meth(self, multiparams, params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/sql/ddl.py", line 68, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1002, in _execute_ddl
compiled
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1189, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1402, in _handle_dbapi_exception
exc_info
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/compat.py", line 186, in reraise
raise value.with_traceback(tb)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/default.py", line 470, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) duplicate column name: auth_state [SQL: 'ALTER TABLE users ADD COLUMN auth_state TEXT']
[E 2017-06-02 19:55:16.017 JupyterHub app:1527]
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1525, in launch_instance_async
yield self.start()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1436, in start
self.subapp.start()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 185, in start
dbutil.upgrade(hub.db_url)
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/dbutil.py", line 80, in upgrade
['alembic', '-c', alembic_ini, 'upgrade', revision]
File "/usr/lib/python3.5/subprocess.py", line 581, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['alembic', '-c', '/tmp/tmpkqvnuq0a/alembic.ini', 'upgrade', 'head']' returned non-zero exit status 1
|
sqlite3.OperationalError
|
def new_session_factory(url="sqlite:///:memory:", reset=False, **kwargs):
"""Create a new session at url"""
if url.startswith("sqlite"):
kwargs.setdefault("connect_args", {"check_same_thread": False})
elif url.startswith("mysql"):
kwargs.setdefault("pool_recycle", 60)
if url.endswith(":memory:"):
# If we're using an in-memory database, ensure that only one connection
# is ever created.
kwargs.setdefault("poolclass", StaticPool)
engine = create_engine(url, **kwargs)
if reset:
Base.metadata.drop_all(engine)
# check the db revision (will raise, pointing to `upgrade-db` if version doesn't match)
check_db_revision(engine)
Base.metadata.create_all(engine)
session_factory = sessionmaker(bind=engine)
return session_factory
|
def new_session_factory(url="sqlite:///:memory:", reset=False, **kwargs):
"""Create a new session at url"""
if url.startswith("sqlite"):
kwargs.setdefault("connect_args", {"check_same_thread": False})
elif url.startswith("mysql"):
kwargs.setdefault("pool_recycle", 60)
if url.endswith(":memory:"):
# If we're using an in-memory database, ensure that only one connection
# is ever created.
kwargs.setdefault("poolclass", StaticPool)
engine = create_engine(url, **kwargs)
if reset:
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
session_factory = sessionmaker(bind=engine)
return session_factory
|
https://github.com/jupyterhub/jupyterhub/issues/1162
|
$ jupyterhub
...
$ jupyterhub upgrade-db
INFO [alembic.runtime.migration] Context impl SQLiteImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
INFO [alembic.runtime.migration] Running upgrade -> 19c0846f6344, base revision for 0.5
INFO [alembic.runtime.migration] Running upgrade 19c0846f6344 -> eeb276e51423, auth_state
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/default.py", line 470, in do_execute
cursor.execute(statement, parameters)
sqlite3.OperationalError: duplicate column name: auth_state
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/bin/alembic", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.5/dist-packages/alembic/config.py", line 479, in main
CommandLine(prog=prog).main(argv=argv)
File "/usr/local/lib/python3.5/dist-packages/alembic/config.py", line 473, in main
self.run_cmd(cfg, options)
File "/usr/local/lib/python3.5/dist-packages/alembic/config.py", line 456, in run_cmd
**dict((k, getattr(options, k, None)) for k in kwarg)
File "/usr/local/lib/python3.5/dist-packages/alembic/command.py", line 254, in upgrade
script.run_env()
File "/usr/local/lib/python3.5/dist-packages/alembic/script/base.py", line 416, in run_env
util.load_python_file(self.dir, 'env.py')
File "/usr/local/lib/python3.5/dist-packages/alembic/util/pyfiles.py", line 93, in load_python_file
module = load_module_py(module_id, path)
File "/usr/local/lib/python3.5/dist-packages/alembic/util/compat.py", line 64, in load_module_py
module_id, path).load_module(module_id)
File "<frozen importlib._bootstrap_external>", line 388, in _check_name_wrapper
File "<frozen importlib._bootstrap_external>", line 809, in load_module
File "<frozen importlib._bootstrap_external>", line 668, in load_module
File "<frozen importlib._bootstrap>", line 268, in _load_module_shim
File "<frozen importlib._bootstrap>", line 693, in _load
File "<frozen importlib._bootstrap>", line 673, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 665, in exec_module
File "<frozen importlib._bootstrap>", line 222, in _call_with_frames_removed
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/alembic/env.py", line 70, in <module>
run_migrations_online()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/alembic/env.py", line 65, in run_migrations_online
context.run_migrations()
File "<string>", line 8, in run_migrations
File "/usr/local/lib/python3.5/dist-packages/alembic/runtime/environment.py", line 817, in run_migrations
self.get_context().run_migrations(**kw)
File "/usr/local/lib/python3.5/dist-packages/alembic/runtime/migration.py", line 323, in run_migrations
step.migration_fn(**kw)
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/alembic/versions/eeb276e51423_auth_state.py", line 21, in upgrade
op.add_column('users', sa.Column('auth_state', JSONDict))
File "<string>", line 8, in add_column
File "<string>", line 3, in add_column
File "/usr/local/lib/python3.5/dist-packages/alembic/operations/ops.py", line 1551, in add_column
return operations.invoke(op)
File "/usr/local/lib/python3.5/dist-packages/alembic/operations/base.py", line 318, in invoke
return fn(self, operation)
File "/usr/local/lib/python3.5/dist-packages/alembic/operations/toimpl.py", line 123, in add_column
schema=schema
File "/usr/local/lib/python3.5/dist-packages/alembic/ddl/impl.py", line 172, in add_column
self._exec(base.AddColumn(table_name, column, schema=schema))
File "/usr/local/lib/python3.5/dist-packages/alembic/ddl/impl.py", line 118, in _exec
return conn.execute(construct, *multiparams, **params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 945, in execute
return meth(self, multiparams, params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/sql/ddl.py", line 68, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1002, in _execute_ddl
compiled
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1189, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1402, in _handle_dbapi_exception
exc_info
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/util/compat.py", line 186, in reraise
raise value.with_traceback(tb)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "/usr/local/lib/python3.5/dist-packages/sqlalchemy/engine/default.py", line 470, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) duplicate column name: auth_state [SQL: 'ALTER TABLE users ADD COLUMN auth_state TEXT']
[E 2017-06-02 19:55:16.017 JupyterHub app:1527]
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1525, in launch_instance_async
yield self.start()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1436, in start
self.subapp.start()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 185, in start
dbutil.upgrade(hub.db_url)
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/dbutil.py", line 80, in upgrade
['alembic', '-c', alembic_ini, 'upgrade', revision]
File "/usr/lib/python3.5/subprocess.py", line 581, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['alembic', '-c', '/tmp/tmpkqvnuq0a/alembic.ini', 'upgrade', 'head']' returned non-zero exit status 1
|
sqlite3.OperationalError
|
def start(self):
"""Start a managed service"""
if not self.managed:
raise RuntimeError("Cannot start unmanaged service %s" % self)
self.log.info("Starting service %r: %r", self.name, self.command)
env = {}
env.update(self.environment)
env["JUPYTERHUB_SERVICE_NAME"] = self.name
env["JUPYTERHUB_API_TOKEN"] = self.api_token
env["JUPYTERHUB_API_URL"] = self.hub_api_url
env["JUPYTERHUB_BASE_URL"] = self.base_url
if self.url:
env["JUPYTERHUB_SERVICE_URL"] = self.url
env["JUPYTERHUB_SERVICE_PREFIX"] = self.server.base_url
self.spawner = _ServiceSpawner(
cmd=self.command,
environment=env,
api_token=self.api_token,
cwd=self.cwd,
user=_MockUser(
name=self.user,
service=self,
server=self.orm.server,
),
)
self.spawner.start()
self.proc = self.spawner.proc
self.spawner.add_poll_callback(self._proc_stopped)
self.spawner.start_polling()
|
def start(self):
"""Start a managed service"""
if not self.managed:
raise RuntimeError("Cannot start unmanaged service %s" % self)
self.log.info("Starting service %r: %r", self.name, self.command)
env = {}
env.update(self.environment)
env["JUPYTERHUB_SERVICE_NAME"] = self.name
env["JUPYTERHUB_API_TOKEN"] = self.api_token
env["JUPYTERHUB_API_URL"] = self.hub_api_url
env["JUPYTERHUB_BASE_URL"] = self.base_url
env["JUPYTERHUB_SERVICE_PREFIX"] = self.server.base_url
env["JUPYTERHUB_SERVICE_URL"] = self.url
self.spawner = _ServiceSpawner(
cmd=self.command,
environment=env,
api_token=self.api_token,
cwd=self.cwd,
user=_MockUser(
name=self.user,
service=self,
server=self.orm.server,
),
)
self.spawner.start()
self.proc = self.spawner.proc
self.spawner.add_poll_callback(self._proc_stopped)
self.spawner.start_polling()
|
https://github.com/jupyterhub/jupyterhub/issues/765
|
[C 2016-09-20 02:28:37.026 JupyterHub app:1444] Failed to start service cull-idle
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/app.py", line 1442, in start
yield service.start()
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/services/service.py", line 228, in start
env['JUPYTERHUB_SERVICE_PREFIX'] = self.server.base_url
AttributeError: 'NoneType' object has no attribute 'base_url'
|
AttributeError
|
def get_init_detail(self, params=None, user=None):
"""
At the end of the initialization we return the certificate and the
PKCS12 file, if the private key exists.
"""
response_detail = TokenClass.get_init_detail(self, params, user)
params = params or {}
certificate = self.get_tokeninfo("certificate")
response_detail["certificate"] = certificate
privatekey = self.get_tokeninfo("privatekey")
# If there is a private key, we dump a PKCS12
if privatekey:
response_detail["pkcs12"] = b64encode_and_unicode(self._create_pkcs12_bin())
return response_detail
|
def get_init_detail(self, params=None, user=None):
"""
At the end of the initialization we return the certificate and the
PKCS12 file, if the private key exists.
"""
response_detail = TokenClass.get_init_detail(self, params, user)
params = params or {}
certificate = self.get_tokeninfo("certificate")
response_detail["certificate"] = certificate
privatekey = self.get_tokeninfo("privatekey")
# If there is a private key, we dump a PKCS12
if privatekey:
response_detail["pkcs12"] = base64.b64encode(self._create_pkcs12_bin())
return response_detail
|
https://github.com/privacyidea/privacyidea/issues/1799
|
[2019-08-19 16:06:52,849] ERROR in app: Exception on /token/init [POST]
Traceback (most recent call last):
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/lib/prepolicy.py", line 129, in policy_wrapper
return wrapped_function(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/lib/prepolicy.py", line 129, in policy_wrapper
return wrapped_function(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/lib/prepolicy.py", line 129, in policy_wrapper
return wrapped_function(*args, **kwds)
[Previous line repeated 15 more times]
File "/home/fred/privacyidea/privacyidea/privacyidea/api/lib/postpolicy.py", line 102, in policy_wrapper
response = wrapped_function(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/lib/subscriptions.py", line 333, in check_subscription_wrapper
f_result = func(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/lib/event.py", line 99, in event_wrapper
f_result = func(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/lib/log.py", line 154, in log_wrapper
return func(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/token.py", line 291, in init
return send_result(True, details=response_details)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/lib/utils.py", line 104, in send_result
return jsonify(prepare_result(obj, rid, details))
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/json/__init__.py", line 321, in jsonify
dumps(data, indent=indent, separators=separators) + '\n',
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/json/__init__.py", line 179, in dumps
rv = _json.dumps(obj, **kwargs)
File "/usr/lib64/python3.7/json/__init__.py", line 238, in dumps
**kw).encode(obj)
File "/usr/lib64/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/lib64/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/json/__init__.py", line 81, in default
return _json.JSONEncoder.default(self, o)
File "/usr/lib64/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type bytes is not JSON serializable
|
TypeError
|
def get_as_dict(self):
"""
This returns the token data as a dictionary.
It is used to display the token list at /token/list.
The certificate token can add the PKCS12 file if it exists
:return: The token data as dict
:rtype: dict
"""
# first get the database values as dict
token_dict = self.token.get()
if "privatekey" in token_dict.get("info"):
token_dict["info"]["pkcs12"] = b64encode_and_unicode(self._create_pkcs12_bin())
return token_dict
|
def get_as_dict(self):
"""
This returns the token data as a dictionary.
It is used to display the token list at /token/list.
The certificate token can add the PKCS12 file if it exists
:return: The token data as dict
:rtype: dict
"""
# first get the database values as dict
token_dict = self.token.get()
if "privatekey" in token_dict.get("info"):
token_dict["info"]["pkcs12"] = base64.b64encode(self._create_pkcs12_bin())
# del(token_dict["privatekey"])
return token_dict
|
https://github.com/privacyidea/privacyidea/issues/1799
|
[2019-08-19 16:06:52,849] ERROR in app: Exception on /token/init [POST]
Traceback (most recent call last):
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/lib/prepolicy.py", line 129, in policy_wrapper
return wrapped_function(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/lib/prepolicy.py", line 129, in policy_wrapper
return wrapped_function(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/lib/prepolicy.py", line 129, in policy_wrapper
return wrapped_function(*args, **kwds)
[Previous line repeated 15 more times]
File "/home/fred/privacyidea/privacyidea/privacyidea/api/lib/postpolicy.py", line 102, in policy_wrapper
response = wrapped_function(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/lib/subscriptions.py", line 333, in check_subscription_wrapper
f_result = func(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/lib/event.py", line 99, in event_wrapper
f_result = func(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/lib/log.py", line 154, in log_wrapper
return func(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/token.py", line 291, in init
return send_result(True, details=response_details)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/lib/utils.py", line 104, in send_result
return jsonify(prepare_result(obj, rid, details))
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/json/__init__.py", line 321, in jsonify
dumps(data, indent=indent, separators=separators) + '\n',
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/json/__init__.py", line 179, in dumps
rv = _json.dumps(obj, **kwargs)
File "/usr/lib64/python3.7/json/__init__.py", line 238, in dumps
**kw).encode(obj)
File "/usr/lib64/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/lib64/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/home/fred/privacyidea/privacyidea/venv3/lib/python3.7/site-packages/flask/json/__init__.py", line 81, in default
return _json.JSONEncoder.default(self, o)
File "/usr/lib64/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type bytes is not JSON serializable
|
TypeError
|
def upgrade():
try:
op.create_table(
"tokenowner",
sa.Column("id", sa.Integer()),
sa.Column("token_id", sa.Integer(), nullable=True),
sa.Column("resolver", sa.Unicode(length=120), nullable=True),
sa.Column("user_id", sa.Unicode(length=320), nullable=True),
sa.Column("realm_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["realm_id"],
["realm.id"],
),
sa.ForeignKeyConstraint(
["token_id"],
["token.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_tokenowner_resolver"), "tokenowner", ["resolver"], unique=False
)
op.create_index(
op.f("ix_tokenowner_user_id"), "tokenowner", ["user_id"], unique=False
)
except Exception as exx:
print("Can not create table 'tokenowner'. It probably already exists")
print(exx)
try:
bind = op.get_bind()
session = orm.Session(bind=bind)
# For each token, that has an owner, create a tokenowner entry
for token in session.query(Token).filter(Token.user_id):
token_realms = (
session.query(TokenRealm).filter(TokenRealm.token_id == token.id).all()
)
realm_id = None
if not token_realms:
sys.stderr.write(
"{serial!s}, {userid!s}, {resolver!s}, "
"Error while migrating token assignment. "
"This token has no realm assignments!".format(
serial=token.serial,
userid=token.user_id,
resolver=token.resolver,
)
)
elif len(token_realms) == 1:
realm_id = token_realms[0].realm_id
elif len(token_realms) > 1:
# If the resolver is only contained in one realm, we fetch the realms:
reso_realms = (
session.query(ResolverRealm)
.filter(ResolverRealm.resolver == token.resolver)
.all()
)
if not reso_realms:
sys.stderr.write(
"{serial!s}, {userid!s}, {resolver!s}, "
"The token is assigned, but the assigned resolver is not "
"contained in any realm!".format(
serial=token.serial,
userid=token.user_id,
resolver=token.resolver,
)
)
elif len(reso_realms) == 1:
# The resolver is only in one realm, so this is the new realm of the token!
realm_id = reso_realms[0].realm_id
elif len(reso_realms) > 1:
# The resolver is contained in two realms, we have to apply more logic between the realms in which
# the resolver is contained and the realms, to which the token is assigend.
found_realm_ids = []
for token_realm in token_realms:
if token_realm.realm_id in [r.realm_id for r in reso_realms]:
# The token realm, that also fits the resolver_realm is used as owner realm
found_realm_ids.append(realm_id)
if len(found_realm_ids) > 1:
sys.stderr.write(
"{serial!s}, {userid!s}, {resolver!s}, "
"Your realm configuration for the token is not distinct!. "
"The tokenowner could be in multiple realms! "
"The token is assigned to the following realms and the resolver is also "
"contained in these realm IDs: {realms!s}.".format(
serial=token.serial,
userid=token.user_id,
resolver=token.resolver,
realms=found_realm_ids,
)
)
elif len(found_realm_ids) == 1:
realm_id = found_realm_ids[0]
else:
sys.stderr.write(
"{serial!s}, {userid!s}, {resolver!s}, "
"Can not assign token. The resolver is not contained in any "
"realms, to which the token is assigned!"
)
to = TokenOwner(
token_id=token.id,
user_id=token.user_id,
resolver=token.resolver,
realm_id=realm_id,
)
session.add(to)
session.commit()
# Now we drop the columns
op.drop_column("token", "user_id")
op.drop_column("token", "resolver")
op.drop_column("token", "resolver_type")
except Exception as exx:
session.rollback()
print("Failed to migrate token assignment data!")
print(exx)
|
def upgrade():
try:
op.create_table(
"tokenowner",
sa.Column("id", sa.Integer()),
sa.Column("token_id", sa.Integer(), nullable=True),
sa.Column("resolver", sa.Unicode(length=120), nullable=True),
sa.Column("user_id", sa.Unicode(length=320), nullable=True),
sa.Column("realm_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["realm_id"],
["realm.id"],
),
sa.ForeignKeyConstraint(
["token_id"],
["token.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_tokenowner_resolver"), "tokenowner", ["resolver"], unique=False
)
op.create_index(
op.f("ix_tokenowner_user_id"), "tokenowner", ["user_id"], unique=False
)
except Exception as exx:
print("Can not create table 'tokenowner'. It probably already exists")
print(exx)
try:
bind = op.get_bind()
session = orm.Session(bind=bind)
# For each token, that has an owner, create a tokenowner entry
for token in session.query(Token).filter(Token.user_id):
token_realms = TokenRealm.query.filter(
TokenRealm.token_id == token.id
).all()
realm_id = None
if not token_realms:
sys.stderr.write(
"{serial!s}, {userid!s}, {resolver!s}, "
"Error while migrating token assignment. "
"This token has no realm assignments!".format(
serial=token.serial,
userid=token.user_id,
resolver=token.resolver,
)
)
elif len(token_realms) == 1:
realm_id = token_realms[0].realm_id
elif len(token_realms) > 1:
# If the resolver is only contained in one realm, we fetch the realms:
reso_realms = ResolverRealm.query.filter(
ResolverRealm.resolver == token.resolver
).all()
if not reso_realms:
sys.stderr.write(
"{serial!s}, {userid!s}, {resolver!s}, "
"The token is assigned, but the assigned resolver is not "
"contained in any realm!".format(
serial=token.serial,
userid=token.user_id,
resolver=token.resolver,
)
)
elif len(reso_realms) == 1:
# The resolver is only in one realm, so this is the new realm of the token!
realm_id = reso_realms[0].realm_id
elif len(reso_realms) > 1:
# The resolver is contained in two realms, we have to apply more logic between the realms in which
# the resolver is contained and the realms, to which the token is assigend.
found_realm_ids = []
for token_realm in token_realms:
if token_realm.realm_id in [r.realm_id for r in reso_realms]:
# The token realm, that also fits the resolver_realm is used as owner realm
found_realm_ids.append(realm_id)
if len(found_realm_ids) > 1:
sys.stderr.write(
"{serial!s}, {userid!s}, {resolver!s}, "
"Your realm configuration for the token is not distinct!. "
"The tokenowner could be in multiple realms! "
"The token is assigned to the following realms and the resolver is also "
"contained in these realm IDs: {realms!s}.".format(
serial=token.serial,
userid=token.user_id,
resolver=token.resolver,
realms=found_realm_ids,
)
)
elif len(found_realm_ids) == 1:
realm_id = found_realm_ids[0]
else:
sys.stderr.write(
"{serial!s}, {userid!s}, {resolver!s}, "
"Can not assign token. The resolver is not contained in any "
"realms, to which the token is assigned!"
)
to = TokenOwner(
token_id=token.id,
user_id=token.user_id,
resolver=token.resolver,
realm_id=realm_id,
)
session.add(to)
session.commit()
# Now we drop the columns
op.drop_column("token", "user_id")
op.drop_column("token", "resolver")
op.drop_column("token", "resolver_type")
except Exception as exx:
session.rollback()
print("Failed to migrate token assignment data!")
print(exx)
|
https://github.com/privacyidea/privacyidea/issues/1461
|
ERROR [sqlalchemy.pool.QueuePool] Exception during reset or similar
Traceback (most recent call last):
File "/opt/venv/lib/python2.7/site-packages/sqlalchemy/pool.py", line 742, in _finalize_fairy
fairy._reset(pool)
File "/opt/venv/lib/python2.7/site-packages/sqlalchemy/pool.py", line 927, in _reset
self._reset_agent.rollback()
File "/opt/venv/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1697, in rollback
self._do_rollback()
File "/opt/venv/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1735, in _do_rollback
self.connection._rollback_impl()
File "/opt/venv/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 733, in _rollback_impl
self._handle_dbapi_exception(e, None, None, None, None)
File "/opt/venv/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1458, in _handle_dbapi_exception
util.raise_from_cause(sqlalchemy_exception, exc_info)
File "/opt/venv/lib/python2.7/site-packages/sqlalchemy/util/compat.py", line 296, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/opt/venv/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 731, in _rollback_impl
self.engine.dialect.do_rollback(self.connection)
File "/opt/venv/lib/python2.7/site-packages/sqlalchemy/dialects/mysql/base.py", line 2148, in do_rollback
dbapi_connection.rollback()
File "/opt/venv/lib/python2.7/site-packages/pymysql/connections.py", line 809, in rollback
self._read_ok_packet()
File "/opt/venv/lib/python2.7/site-packages/pymysql/connections.py", line 773, in _read_ok_packet
pkt = self._read_packet()
File "/opt/venv/lib/python2.7/site-packages/pymysql/connections.py", line 1032, in _read_packet
packet_header = self._read_bytes(4)
File "/opt/venv/lib/python2.7/site-packages/pymysql/connections.py", line 1078, in _read_bytes
CR.CR_SERVER_LOST, "Lost connection to MySQL server during query")
OperationalError: (pymysql.err.OperationalError) (2013, 'Lost connection to MySQL server during query') (Background on this error at: http://sqlalche.me/e/e3q8)
|
OperationalError
|
def reload_from_db(self):
"""
Read the timestamp from the database. If the timestamp is newer than
the internal timestamp, then read the complete data
:return:
"""
if (
not self.timestamp
or self.timestamp
+ datetime.timedelta(
seconds=current_app.config.get("PI_CHECK_RELOAD_CONFIG", 0)
)
< datetime.datetime.now()
):
db_ts = Config.query.filter_by(Key=PRIVACYIDEA_TIMESTAMP).first()
if reload_db(self.timestamp, db_ts):
self.config = {}
self.resolver = {}
self.realm = {}
self.default_realm = None
for sysconf in Config.query.all():
self.config[sysconf.Key] = {
"Value": sysconf.Value,
"Type": sysconf.Type,
"Description": sysconf.Description,
}
for resolver in Resolver.query.all():
resolverdef = {"type": resolver.rtype, "resolvername": resolver.name}
data = {}
for rconf in resolver.config_list:
if rconf.Type == "password":
value = decryptPassword(rconf.Value, convert_unicode=True)
else:
value = rconf.Value
data[rconf.Key] = value
resolverdef["data"] = data
self.resolver[resolver.name] = resolverdef
for realm in Realm.query.all():
if realm.default:
self.default_realm = realm.name
realmdef = {
"option": realm.option,
"default": realm.default,
"resolver": [],
}
for x in realm.resolver_list:
realmdef["resolver"].append(
{
"priority": x.priority,
"name": x.resolver.name,
"type": x.resolver.rtype,
}
)
self.realm[realm.name] = realmdef
self.timestamp = datetime.datetime.now()
|
def reload_from_db(self):
"""
Read the timestamp from the database. If the timestamp is newer than
the internal timestamp, then read the complete data
:return:
"""
if (
not self.timestamp
or self.timestamp
+ datetime.timedelta(
seconds=current_app.config.get("PI_CHECK_RELOAD_CONFIG", 0)
)
< datetime.datetime.now()
):
db_ts = Config.query.filter_by(Key=PRIVACYIDEA_TIMESTAMP).first()
if reload_db(self.timestamp, db_ts):
self.config = {}
self.resolver = {}
self.realm = {}
self.default_realm = None
for sysconf in Config.query.all():
self.config[sysconf.Key] = {
"Value": sysconf.Value,
"Type": sysconf.Type,
"Description": sysconf.Description,
}
for resolver in Resolver.query.all():
resolverdef = {"type": resolver.rtype, "resolvername": resolver.name}
data = {}
for rconf in resolver.config_list:
if rconf.Type == "password":
value = decryptPassword(rconf.Value)
else:
value = rconf.Value
data[rconf.Key] = value
resolverdef["data"] = data
self.resolver[resolver.name] = resolverdef
for realm in Realm.query.all():
if realm.default:
self.default_realm = realm.name
realmdef = {
"option": realm.option,
"default": realm.default,
"resolver": [],
}
for x in realm.resolver_list:
realmdef["resolver"].append(
{
"priority": x.priority,
"name": x.resolver.name,
"type": x.resolver.rtype,
}
)
self.realm[realm.name] = realmdef
self.timestamp = datetime.datetime.now()
|
https://github.com/privacyidea/privacyidea/issues/1181
|
[2018-08-14 14:49:53,123] ERROR in app: Exception on /system/documentation [GET]
Traceback (most recent call last):
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/lib/prepolicy.py", line 120, in policy_wrapper
return wrapped_function(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/auth.py", line 316, in decorated_function
return f(*args, **kwargs)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/system.py", line 113, in get_config_documentation
context=context))
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/templating.py", line 135, in render_template
context, ctx.app)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/templating.py", line 117, in _render
rv = template.render(context)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/home/fred/privacyidea/privacyidea/privacyidea/static/templates/documentation.rst", line 56, in top-level template code
{{k}}: **{{v}}**
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 8: ordinal not in range(128)
|
UnicodeDecodeError
|
def get_config(self, key=None, default=None, role="admin", return_bool=False):
"""
:param key: A key to retrieve
:type key: string
:param default: The default value, if it does not exist in the database
:param role: The role which wants to retrieve the system config. Can be
"admin" or "public". If "public", only values with type="public"
are returned.
:type role: string
:param return_bool: If the a boolean value should be returned. Returns
True if value is "True", "true", 1, "1", True...
:return: If key is None, then a dictionary is returned. If a certain key
is given a string/bool is returned.
"""
default_true_keys = [
SYSCONF.PREPENDPIN,
SYSCONF.SPLITATSIGN,
SYSCONF.INCFAILCOUNTER,
SYSCONF.RETURNSAML,
]
r_config = {}
# reduce the dictionary to only public keys!
reduced_config = {}
for ckey, cvalue in self.config.iteritems():
if role == "admin" or cvalue.get("Type") == "public":
reduced_config[ckey] = self.config[ckey]
if not reduced_config and role == "admin":
reduced_config = self.config
for ckey, cvalue in reduced_config.iteritems():
if cvalue.get("Type") == "password":
# decrypt the password
r_config[ckey] = decryptPassword(cvalue.get("Value"), convert_unicode=True)
else:
r_config[ckey] = cvalue.get("Value")
for t_key in default_true_keys:
if t_key not in r_config:
r_config[t_key] = "True"
if key:
# We only return a single key
r_config = r_config.get(key, default)
if return_bool:
if isinstance(r_config, bool):
pass
if isinstance(r_config, int):
r_config = r_config > 0
if isinstance(r_config, basestring):
r_config = is_true(r_config.lower())
return r_config
|
def get_config(self, key=None, default=None, role="admin", return_bool=False):
"""
:param key: A key to retrieve
:type key: string
:param default: The default value, if it does not exist in the database
:param role: The role which wants to retrieve the system config. Can be
"admin" or "public". If "public", only values with type="public"
are returned.
:type role: string
:param return_bool: If the a boolean value should be returned. Returns
True if value is "True", "true", 1, "1", True...
:return: If key is None, then a dictionary is returned. If a certain key
is given a string/bool is returned.
"""
default_true_keys = [
SYSCONF.PREPENDPIN,
SYSCONF.SPLITATSIGN,
SYSCONF.INCFAILCOUNTER,
SYSCONF.RETURNSAML,
]
r_config = {}
# reduce the dictionary to only public keys!
reduced_config = {}
for ckey, cvalue in self.config.iteritems():
if role == "admin" or cvalue.get("Type") == "public":
reduced_config[ckey] = self.config[ckey]
if not reduced_config and role == "admin":
reduced_config = self.config
for ckey, cvalue in reduced_config.iteritems():
if cvalue.get("Type") == "password":
# decrypt the password
r_config[ckey] = decryptPassword(cvalue.get("Value"))
else:
r_config[ckey] = cvalue.get("Value")
for t_key in default_true_keys:
if t_key not in r_config:
r_config[t_key] = "True"
if key:
# We only return a single key
r_config = r_config.get(key, default)
if return_bool:
if isinstance(r_config, bool):
pass
if isinstance(r_config, int):
r_config = r_config > 0
if isinstance(r_config, basestring):
r_config = is_true(r_config.lower())
return r_config
|
https://github.com/privacyidea/privacyidea/issues/1181
|
[2018-08-14 14:49:53,123] ERROR in app: Exception on /system/documentation [GET]
Traceback (most recent call last):
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/lib/prepolicy.py", line 120, in policy_wrapper
return wrapped_function(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/auth.py", line 316, in decorated_function
return f(*args, **kwargs)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/system.py", line 113, in get_config_documentation
context=context))
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/templating.py", line 135, in render_template
context, ctx.app)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/templating.py", line 117, in _render
rv = template.render(context)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/home/fred/privacyidea/privacyidea/privacyidea/static/templates/documentation.rst", line 56, in top-level template code
{{k}}: **{{v}}**
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 8: ordinal not in range(128)
|
UnicodeDecodeError
|
def decryptPassword(cryptPass, convert_unicode=False):
"""
Decrypt the encrypted password ``cryptPass`` and return it.
If an error occurs during decryption, return FAILED_TO_DECRYPT_PASSWORD.
:param cryptPass: bytestring
:param convert_unicode: If true, interpret the decrypted password as an UTF-8 string
and convert it to unicode. If an error occurs here,
the original bytestring is returned.
"""
# NOTE: Why do we have the ``convert_unicode`` parameter?
# Up until now, this always returned bytestrings. However, this breaks
# LDAP and SQL resolvers, which expect this to return an unicode string
# (and this makes more sense, because ``encryptPassword`` also
# takes unicode strings!). But always returning unicode might break
# other call sites of ``decryptPassword``. So we add the
# keyword argument to avoid breaking compatibility.
from privacyidea.lib.utils import to_unicode
hsm = _get_hsm()
try:
ret = hsm.decrypt_password(cryptPass)
except Exception as exx: # pragma: no cover
log.warning(exx)
ret = FAILED_TO_DECRYPT_PASSWORD
try:
if convert_unicode:
ret = to_unicode(ret)
except Exception as exx: # pragma: no cover
log.warning(exx)
# just keep ``ret`` as a bytestring in that case
return ret
|
def decryptPassword(cryptPass):
hsm = _get_hsm()
try:
ret = hsm.decrypt_password(cryptPass)
except Exception as exx: # pragma: no cover
log.warning(exx)
ret = FAILED_TO_DECRYPT_PASSWORD
return ret
|
https://github.com/privacyidea/privacyidea/issues/1181
|
[2018-08-14 14:49:53,123] ERROR in app: Exception on /system/documentation [GET]
Traceback (most recent call last):
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/lib/prepolicy.py", line 120, in policy_wrapper
return wrapped_function(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/auth.py", line 316, in decorated_function
return f(*args, **kwargs)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/system.py", line 113, in get_config_documentation
context=context))
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/templating.py", line 135, in render_template
context, ctx.app)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/templating.py", line 117, in _render
rv = template.render(context)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/home/fred/privacyidea/privacyidea/privacyidea/static/templates/documentation.rst", line 56, in top-level template code
{{k}}: **{{v}}**
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 8: ordinal not in range(128)
|
UnicodeDecodeError
|
def _create_engine(self):
log.info(
"using the connect string {0!s}".format(
censor_connect_string(self.connect_string)
)
)
try:
log.debug(
"using pool_size={0!s}, pool_timeout={1!s}, pool_recycle={2!s}".format(
self.pool_size, self.pool_timeout, self.pool_recycle
)
)
engine = create_engine(
self.connect_string,
encoding=self.encoding,
convert_unicode=False,
pool_size=self.pool_size,
pool_recycle=self.pool_recycle,
pool_timeout=self.pool_timeout,
)
except TypeError:
# The DB Engine/Poolclass might not support the pool_size.
log.debug("connecting without pool_size.")
engine = create_engine(
self.connect_string, encoding=self.encoding, convert_unicode=False
)
return engine
|
def _create_engine(self):
log.info(
"using the connect string {0!s}".format(
censor_connect_string(self.connect_string)
)
)
try:
log.debug(
"using pool_size={0!s}, pool_timeout={1!s}, pool_recycle={2!s}".format(
self.pool_size, self.pool_timeout, self.pool_recycle
)
)
engine = create_engine(
self.connect_string,
encoding=self.encoding,
convert_unicode=False,
pool_size=self.pool_size,
pool_recycle=self.pool_recycle,
pool_timeout=self.pool_timeout,
)
except TypeError:
# The DB Engine/Poolclass might not support the pool_size.
log.debug("connecting without pool_size.")
engine = create_engine(
self.connect_string, encoding=self.encoding, convert_unicode=False
)
return engine
|
https://github.com/privacyidea/privacyidea/issues/1181
|
[2018-08-14 14:49:53,123] ERROR in app: Exception on /system/documentation [GET]
Traceback (most recent call last):
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/lib/prepolicy.py", line 120, in policy_wrapper
return wrapped_function(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/auth.py", line 316, in decorated_function
return f(*args, **kwargs)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/system.py", line 113, in get_config_documentation
context=context))
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/templating.py", line 135, in render_template
context, ctx.app)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/templating.py", line 117, in _render
rv = template.render(context)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/home/fred/privacyidea/privacyidea/privacyidea/static/templates/documentation.rst", line 56, in top-level template code
{{k}}: **{{v}}**
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 8: ordinal not in range(128)
|
UnicodeDecodeError
|
def _create_connect_string(param):
"""
create the connectstring
Port, Password, conParams, Driver, User,
Server, Database
"""
port = ""
password = ""
conParams = ""
if param.get("Port"):
port = ":{0!s}".format(param.get("Port"))
if param.get("Password"):
password = ":{0!s}".format(param.get("Password"))
if param.get("conParams"):
conParams = "?{0!s}".format(param.get("conParams"))
connect_string = "{0!s}://{1!s}{2!s}{3!s}{4!s}{5!s}/{6!s}{7!s}".format(
param.get("Driver", ""),
param.get("User", ""),
password,
"@" if (param.get("User") or password) else "",
param.get("Server", ""),
port,
param.get("Database", ""),
conParams,
)
# SQLAlchemy does not like a unicode connect string!
if param.get("Driver").lower() == "sqlite":
connect_string = str(connect_string)
return connect_string
|
def _create_connect_string(param):
"""
create the connectstring
Port, Password, conParams, Driver, User,
Server, Database
"""
port = ""
password = ""
conParams = ""
if param.get("Port"):
port = ":{0!s}".format(param.get("Port"))
if param.get("Password"):
password = ":{0!s}".format(param.get("Password"))
if param.get("conParams"):
conParams = "?{0!s}".format(param.get("conParams"))
connect_string = "{0!s}://{1!s}{2!s}{3!s}{4!s}{5!s}/{6!s}{7!s}".format(
param.get("Driver", ""),
param.get("User", ""),
password,
"@" if (param.get("User") or password) else "",
param.get("Server", ""),
port,
param.get("Database", ""),
conParams,
)
# SQLAlchemy does not like a unicode connect string!
if param.get("Driver").lower() == "sqlite":
connect_string = str(connect_string)
return connect_string
|
https://github.com/privacyidea/privacyidea/issues/1181
|
[2018-08-14 14:49:53,123] ERROR in app: Exception on /system/documentation [GET]
Traceback (most recent call last):
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/lib/prepolicy.py", line 120, in policy_wrapper
return wrapped_function(*args, **kwds)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/auth.py", line 316, in decorated_function
return f(*args, **kwargs)
File "/home/fred/privacyidea/privacyidea/privacyidea/api/system.py", line 113, in get_config_documentation
context=context))
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/templating.py", line 135, in render_template
context, ctx.app)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/flask/templating.py", line 117, in _render
rv = template.render(context)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/home/fred/privacyidea/privacyidea/venv/lib/python2.7/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/home/fred/privacyidea/privacyidea/privacyidea/static/templates/documentation.rst", line 56, in top-level template code
{{k}}: **{{v}}**
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 8: ordinal not in range(128)
|
UnicodeDecodeError
|
def getUserInfo(self, userId):
"""
This function returns all user info for a given userid/object.
:param userId: The userid of the object
:type userId: string
:return: A dictionary with the keys defined in self.userinfo
:rtype: dict
"""
ret = {}
self._bind()
if self.uidtype.lower() == "dn":
# encode utf8, so that also german ulauts work in the DN
self.l.search(
search_base=to_utf8(userId),
search_scope=self.scope,
search_filter="(&" + self.searchfilter + ")",
attributes=self.userinfo.values(),
)
else:
if self.uidtype == "objectGUID":
userId = uuid.UUID("{%s}" % userId).bytes_le
userId = escape_bytes(userId)
filter = "(&%s(%s=%s))" % (self.searchfilter, self.uidtype, userId)
self.l.search(
search_base=self.basedn,
search_scope=self.scope,
search_filter=filter,
attributes=self.userinfo.values(),
)
r = self.l.response
r = self._trim_result(r)
if len(r) > 1: # pragma: no cover
raise Exception("Found more than one object for uid %r" % userId)
for entry in r:
attributes = entry.get("attributes")
ret = self._ldap_attributes_to_user_object(attributes)
return ret
|
def getUserInfo(self, userId):
"""
This function returns all user info for a given userid/object.
:param userId: The userid of the object
:type userId: string
:return: A dictionary with the keys defined in self.userinfo
:rtype: dict
"""
ret = {}
self._bind()
if self.uidtype.lower() == "dn":
# encode utf8, so that also german ulauts work in the DN
self.l.search(
search_base=to_utf8(userId),
search_scope=self.scope,
search_filter="(&" + self.searchfilter + ")",
attributes=self.userinfo.values(),
)
else:
filter = "(&%s(%s=%s))" % (self.searchfilter, self.uidtype, userId)
self.l.search(
search_base=self.basedn,
search_scope=self.scope,
search_filter=filter,
attributes=self.userinfo.values(),
)
r = self.l.response
r = self._trim_result(r)
if len(r) > 1: # pragma: no cover
raise Exception("Found more than one object for uid %r" % userId)
for entry in r:
attributes = entry.get("attributes")
ret = self._ldap_attributes_to_user_object(attributes)
return ret
|
https://github.com/privacyidea/privacyidea/issues/385
|
Found this userlist: [...list of users...]
Found this userlist: [...list of users...]
Exiting get_user_list with result [...list of users...]
Exiting get_user_list with result [...list of users...]
Exception on /user/ [GET]
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/flask/app.py", line 1817, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python2.7/dist-packages/flask/app.py", line 1477, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python2.7/dist-packages/flask/app.py", line 1381, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/python2.7/dist-packages/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python2.7/dist-packages/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/lib/python2.7/dist-packages/privacyidea/api/lib/prepolicy.py", line 96, in policy_wrapper
return wrapped_function(*args, **kwds)
File "/usr/lib/python2.7/dist-packages/privacyidea/api/auth.py", line 302, in decorated_function
return f(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/privacyidea/api/user.py", line 104, in get_users
return send_result(users)
File "/usr/lib/python2.7/dist-packages/privacyidea/api/lib/utils.py", line 122, in send_result
return jsonify(res)
File "/usr/lib/python2.7/dist-packages/flask/json.py", line 238, in jsonify
indent=indent),
File "/usr/lib/python2.7/dist-packages/flask/json.py", line 126, in dumps
rv = _json.dumps(obj, **kwargs)
File "/usr/lib/python2.7/dist-packages/simplejson/__init__.py", line 369, in dumps
**kw).encode(obj)
File "/usr/lib/python2.7/dist-packages/simplejson/encoder.py", line 264, in encode
chunks = list(chunks)
File "/usr/lib/python2.7/dist-packages/simplejson/encoder.py", line 612, in _iterencode
for chunk in _iterencode_dict(o, _current_indent_level):
File "/usr/lib/python2.7/dist-packages/simplejson/encoder.py", line 568, in _iterencode_dict
for chunk in chunks:
File "/usr/lib/python2.7/dist-packages/simplejson/encoder.py", line 568, in _iterencode_dict
for chunk in chunks:
File "/usr/lib/python2.7/dist-packages/simplejson/encoder.py", line 455, in _iterencode_list
for chunk in chunks:
File "/usr/lib/python2.7/dist-packages/simplejson/encoder.py", line 535, in _iterencode_dict
yield _encoder(value)
UnicodeDecodeError: 'utf8' codec can't decode byte 0xd6 in position 0: invalid continuation byte
Exception on /user/ [GET]
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/flask/app.py", line 1817, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python2.7/dist-packages/flask/app.py", line 1477, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python2.7/dist-packages/flask/app.py", line 1381, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/python2.7/dist-packages/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python2.7/dist-packages/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/lib/python2.7/dist-packages/privacyidea/api/lib/prepolicy.py", line 96, in policy_wrapper
return wrapped_function(*args, **kwds)
File "/usr/lib/python2.7/dist-packages/privacyidea/api/auth.py", line 302, in decorated_function
return f(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/privacyidea/api/user.py", line 104, in get_users
return send_result(users)
File "/usr/lib/python2.7/dist-packages/privacyidea/api/lib/utils.py", line 122, in send_result
return jsonify(res)
File "/usr/lib/python2.7/dist-packages/flask/json.py", line 238, in jsonify
indent=indent),
File "/usr/lib/python2.7/dist-packages/flask/json.py", line 126, in dumps
rv = _json.dumps(obj, **kwargs)
File "/usr/lib/python2.7/dist-packages/simplejson/__init__.py", line 369, in dumps
**kw).encode(obj)
File "/usr/lib/python2.7/dist-packages/simplejson/encoder.py", line 264, in encode
chunks = list(chunks)
File "/usr/lib/python2.7/dist-packages/simplejson/encoder.py", line 612, in _iterencode
for chunk in _iterencode_dict(o, _current_indent_level):
File "/usr/lib/python2.7/dist-packages/simplejson/encoder.py", line 568, in _iterencode_dict
for chunk in chunks:
File "/usr/lib/python2.7/dist-packages/simplejson/encoder.py", line 568, in _iterencode_dict
for chunk in chunks:
File "/usr/lib/python2.7/dist-packages/simplejson/encoder.py", line 455, in _iterencode_list
for chunk in chunks:
File "/usr/lib/python2.7/dist-packages/simplejson/encoder.py", line 535, in _iterencode_dict
yield _encoder(value)
UnicodeDecodeError: 'utf8' codec can't decode byte 0xd6 in position 0: invalid continuation byte
Can not get param: No JSON object could be decoded
Can not get param: No JSON object could be decoded
|
UnicodeDecodeError
|
def decryptPassword(cryptPass):
hsm = _get_hsm()
try:
ret = hsm.decrypt_password(cryptPass)
except Exception as exx: # pragma: no cover
log.warning(exx)
ret = FAILED_TO_DECRYPT_PASSWORD
return ret
|
def decryptPassword(cryptPass):
hsm = _get_hsm()
try:
ret = hsm.decrypt_password(cryptPass)
except Exception as exx: # pragma: no cover
log.warning(exx)
ret = "FAILED TO DECRYPT PASSWORD!"
return ret
|
https://github.com/privacyidea/privacyidea/issues/350
|
SMTPAuthenticationError: (535, '5.7.8 Error: authentication failed:')
[2016-03-28 19:55:39,479][30338][140453494273792][WARNING][privacyidea.lib.crypto:326] 'NoneType' object has no attribute 'find'
[2016-03-28 19:55:41,528][30338][140453494273792][ERROR][privacyidea.app:1423] Exception on /smtpserver/send_test_email [POST]
Traceback (most recent call last):
File "/opt/privacyidea/venv/local/lib/python2.7/site-packages/flask/app.py", line 1817, in wsgi_app
response = self.full_dispatch_request()
File "/opt/privacyidea/venv/local/lib/python2.7/site-packages/flask/app.py", line 1477, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/privacyidea/venv/local/lib/python2.7/site-packages/flask/app.py", line 1381, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/privacyidea/venv/local/lib/python2.7/site-packages/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/privacyidea/venv/local/lib/python2.7/site-packages/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/privacyidea/privacyidea/api/lib/prepolicy.py", line 96, in policy_wrapper
return wrapped_function(*args, **kwds)
File "/opt/privacyidea/privacyidea/lib/log.py", line 125, in log_wrapper
f_result = func(*args, **kwds)
File "/opt/privacyidea/privacyidea/api/smtpserver.py", line 146, in test
"The configuration %s is working." % identifier)
File "/opt/privacyidea/privacyidea/lib/smtpserver.py", line 93, in test_email
mail.login(config.username, password)
File "/usr/lib/python2.7/smtplib.py", line 613, in login
raise SMTPAuthenticationError(code, resp)
|
SMTPAuthenticationError
|
def _display_as_base(cls):
"""
A decorator that makes an exception class look like its base.
We use this to hide subclasses that are implementation details - the user
should catch the base type, which is what the traceback will show them.
Classes decorated with this decorator are subject to removal without a
deprecation warning.
"""
assert issubclass(cls, Exception)
cls.__name__ = cls.__base__.__name__
return cls
|
def _display_as_base(cls):
"""
A decorator that makes an exception class look like its base.
We use this to hide subclasses that are implementation details - the user
should catch the base type, which is what the traceback will show them.
Classes decorated with this decorator are subject to removal without a
deprecation warning.
"""
assert issubclass(cls, Exception)
cls.__name__ = cls.__base__.__name__
cls.__qualname__ = cls.__base__.__qualname__
set_module(cls.__base__.__module__)(cls)
return cls
|
https://github.com/numpy/numpy/issues/16490
|
Traceback (most recent call last):
File "ufunctypeerror.py", line 6, in <module>
numpy.ones(1) * "foo"
numpy.core._exceptions.UFuncTypeError: ufunc 'multiply' did not contain a loop with signature matching types (dtype('<U32'), dtype('<U32')) -> dtype('<U32')
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "ufunctypeerror.py", line 8, in <module>
pickle.dumps(e)
_pickle.PicklingError: Can't pickle <class 'numpy.core._exceptions.UFuncTypeError'>: it's not the same object as numpy.core._exceptions.UFuncTypeError
|
numpy.core._exceptions.UFuncTypeError
|
def _raw_fft(a, n, axis, is_real, is_forward, inv_norm):
axis = normalize_axis_index(axis, a.ndim)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
fct = 1 / inv_norm
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)] * len(s)
index[axis] = slice(0, n)
a = a[tuple(index)]
else:
index = [slice(None)] * len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[tuple(index)] = a
a = z
if axis == a.ndim - 1:
r = pfi.execute(a, is_real, is_forward, fct)
else:
a = swapaxes(a, axis, -1)
r = pfi.execute(a, is_real, is_forward, fct)
r = swapaxes(r, axis, -1)
return r
|
def _raw_fft(a, n, axis, is_real, is_forward, fct):
axis = normalize_axis_index(axis, a.ndim)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)] * len(s)
index[axis] = slice(0, n)
a = a[tuple(index)]
else:
index = [slice(None)] * len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[tuple(index)] = a
a = z
if axis == a.ndim - 1:
r = pfi.execute(a, is_real, is_forward, fct)
else:
a = swapaxes(a, axis, -1)
r = pfi.execute(a, is_real, is_forward, fct)
r = swapaxes(r, axis, -1)
return r
|
https://github.com/numpy/numpy/issues/14176
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<__array_function__ internals>", line 6, in ifft
File "/home/peter/.local/lib/python3.7/site-packages/numpy/fft/pocketfft.py", line 274, in ifft
fct = 1/n
ZeroDivisionError: division by zero
|
ZeroDivisionError
|
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j,
2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j,
-1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j,
1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j])
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
a = asarray(a)
if n is None:
n = a.shape[axis]
inv_norm = 1
if norm is not None and _unitary(norm):
inv_norm = sqrt(n)
output = _raw_fft(a, n, axis, False, True, inv_norm)
return output
|
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j,
2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j,
-1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j,
1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j])
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
a = asarray(a)
if n is None:
n = a.shape[axis]
fct = 1
if norm is not None and _unitary(norm):
fct = 1 / sqrt(n)
output = _raw_fft(a, n, axis, False, True, fct)
return output
|
https://github.com/numpy/numpy/issues/14176
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<__array_function__ internals>", line 6, in ifft
File "/home/peter/.local/lib/python3.7/site-packages/numpy/fft/pocketfft.py", line 274, in ifft
fct = 1/n
ZeroDivisionError: division by zero
|
ZeroDivisionError
|
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e.,
* ``a[0]`` should contain the zero frequency term,
* ``a[1:n//2]`` should contain the positive-frequency terms,
* ``a[n//2 + 1:]`` should contain the negative-frequency terms, in
increasing order starting from the most negative frequency.
For an even number of input points, ``A[n//2]`` represents the sum of
the values at the positive and negative Nyquist frequencies, as the two
are aliased together. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at ...>, <matplotlib.lines.Line2D object at ...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at ...>
>>> plt.show()
"""
a = asarray(a)
if n is None:
n = a.shape[axis]
if norm is not None and _unitary(norm):
inv_norm = sqrt(max(n, 1))
else:
inv_norm = n
output = _raw_fft(a, n, axis, False, False, inv_norm)
return output
|
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e.,
* ``a[0]`` should contain the zero frequency term,
* ``a[1:n//2]`` should contain the positive-frequency terms,
* ``a[n//2 + 1:]`` should contain the negative-frequency terms, in
increasing order starting from the most negative frequency.
For an even number of input points, ``A[n//2]`` represents the sum of
the values at the positive and negative Nyquist frequencies, as the two
are aliased together. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at ...>, <matplotlib.lines.Line2D object at ...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at ...>
>>> plt.show()
"""
a = asarray(a)
if n is None:
n = a.shape[axis]
if norm is not None and _unitary(norm):
fct = 1 / sqrt(max(n, 1))
else:
fct = 1 / max(n, 1)
output = _raw_fft(a, n, axis, False, False, fct)
return output
|
https://github.com/numpy/numpy/issues/14176
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<__array_function__ internals>", line 6, in ifft
File "/home/peter/.local/lib/python3.7/site-packages/numpy/fft/pocketfft.py", line 274, in ifft
fct = 1/n
ZeroDivisionError: division by zero
|
ZeroDivisionError
|
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a)
inv_norm = 1
if norm is not None and _unitary(norm):
if n is None:
n = a.shape[axis]
inv_norm = sqrt(n)
output = _raw_fft(a, n, axis, True, True, inv_norm)
return output
|
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a)
fct = 1
if norm is not None and _unitary(norm):
if n is None:
n = a.shape[axis]
fct = 1 / sqrt(n)
output = _raw_fft(a, n, axis, True, True, fct)
return output
|
https://github.com/numpy/numpy/issues/14176
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<__array_function__ internals>", line 6, in ifft
File "/home/peter/.local/lib/python3.7/site-packages/numpy/fft/pocketfft.py", line 274, in ifft
fct = 1/n
ZeroDivisionError: division by zero
|
ZeroDivisionError
|
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is taken to be
``2*(m-1)`` where ``m`` is the length of the input along the axis
specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
The correct interpretation of the hermitian input depends on the length of
the original data, as given by `n`. This is because each input shape could
correspond to either an odd or even length signal. By default, `irfft`
assumes an even output length which puts the last entry at the Nyquist
frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,
the value is thus treated as purely real. To avoid losing information, the
correct length of the real input **must** be given.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary
>>> np.fft.irfft([1, -1j, -1])
array([0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a)
if n is None:
n = (a.shape[axis] - 1) * 2
inv_norm = n
if norm is not None and _unitary(norm):
inv_norm = sqrt(n)
output = _raw_fft(a, n, axis, True, False, inv_norm)
return output
|
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is taken to be
``2*(m-1)`` where ``m`` is the length of the input along the axis
specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
The correct interpretation of the hermitian input depends on the length of
the original data, as given by `n`. This is because each input shape could
correspond to either an odd or even length signal. By default, `irfft`
assumes an even output length which puts the last entry at the Nyquist
frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,
the value is thus treated as purely real. To avoid losing information, the
correct length of the real input **must** be given.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary
>>> np.fft.irfft([1, -1j, -1])
array([0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a)
if n is None:
n = (a.shape[axis] - 1) * 2
fct = 1 / n
if norm is not None and _unitary(norm):
fct = 1 / sqrt(n)
output = _raw_fft(a, n, axis, True, False, fct)
return output
|
https://github.com/numpy/numpy/issues/14176
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<__array_function__ internals>", line 6, in ifft
File "/home/peter/.local/lib/python3.7/site-packages/numpy/fft/pocketfft.py", line 274, in ifft
fct = 1/n
ZeroDivisionError: division by zero
|
ZeroDivisionError
|
def __init__(
self, pyfunc, otypes=None, doc=None, excluded=None, cache=False, signature=None
):
self.pyfunc = pyfunc
self.cache = cache
self.signature = signature
self._ufunc = {} # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
for char in otypes:
if char not in typecodes["All"]:
raise ValueError("Invalid otype specified: %s" % (char,))
elif iterable(otypes):
otypes = "".join([_nx.dtype(x).char for x in otypes])
elif otypes is not None:
raise ValueError("Invalid otype specification")
self.otypes = otypes
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
if signature is not None:
self._in_and_out_core_dims = _parse_gufunc_signature(signature)
else:
self._in_and_out_core_dims = None
|
def __init__(
self, pyfunc, otypes=None, doc=None, excluded=None, cache=False, signature=None
):
self.pyfunc = pyfunc
self.cache = cache
self.signature = signature
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
for char in otypes:
if char not in typecodes["All"]:
raise ValueError("Invalid otype specified: %s" % (char,))
elif iterable(otypes):
otypes = "".join([_nx.dtype(x).char for x in otypes])
elif otypes is not None:
raise ValueError("Invalid otype specification")
self.otypes = otypes
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
if signature is not None:
self._in_and_out_core_dims = _parse_gufunc_signature(signature)
else:
self._in_and_out_core_dims = None
|
https://github.com/numpy/numpy/issues/16120
|
Traceback (most recent call last):
File "function.py", line 17, in <module>
g(np.arange(3.0)) # Crash!
File "/home/hylke/.local/lib/python3.8/site-packages/numpy/lib/function_base.py", line 2091, in __call__
return self._vectorize_call(func=func, args=vargs)
File "/home/hylke/.local/lib/python3.8/site-packages/numpy/lib/function_base.py", line 2167, in _vectorize_call
outputs = ufunc(*inputs)
ValueError: invalid number of arguments
|
ValueError
|
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError("args can not be empty")
if self.otypes is not None:
otypes = self.otypes
# self._ufunc is a dictionary whose keys are the number of
# arguments (i.e. len(args)) and whose values are ufuncs created
# by frompyfunc. len(args) can be different for different calls if
# self.pyfunc has parameters with default values. We only use the
# cache when func is self.pyfunc, which occurs when the call uses
# only positional arguments and no arguments are excluded.
nin = len(args)
nout = len(self.otypes)
if func is not self.pyfunc or nin not in self._ufunc:
ufunc = frompyfunc(func, nin, nout)
else:
ufunc = None # We'll get it from self._ufunc
if func is self.pyfunc:
ufunc = self._ufunc.setdefault(nin, ufunc)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
args = [asarray(arg) for arg in args]
if builtins.any(arg.size == 0 for arg in args):
raise ValueError(
"cannot call `vectorize` on size 0 inputs unless `otypes` is set"
)
inputs = [arg.flat[0] for arg in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = "".join([asarray(outputs[_k]).dtype.char for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
|
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError("args can not be empty")
if self.otypes is not None:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
args = [asarray(arg) for arg in args]
if builtins.any(arg.size == 0 for arg in args):
raise ValueError(
"cannot call `vectorize` on size 0 inputs unless `otypes` is set"
)
inputs = [arg.flat[0] for arg in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = "".join([asarray(outputs[_k]).dtype.char for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
|
https://github.com/numpy/numpy/issues/16120
|
Traceback (most recent call last):
File "function.py", line 17, in <module>
g(np.arange(3.0)) # Crash!
File "/home/hylke/.local/lib/python3.8/site-packages/numpy/lib/function_base.py", line 2091, in __call__
return self._vectorize_call(func=func, args=vargs)
File "/home/hylke/.local/lib/python3.8/site-packages/numpy/lib/function_base.py", line 2167, in _vectorize_call
outputs = ufunc(*inputs)
ValueError: invalid number of arguments
|
ValueError
|
def __getitem__(self, indx):
"""
x.__getitem__(y) <==> x[y]
Return the item described by i, as a masked array.
"""
# We could directly use ndarray.__getitem__ on self.
# But then we would have to modify __array_finalize__ to prevent the
# mask of being reshaped if it hasn't been set up properly yet
# So it's easier to stick to the current version
dout = self.data[indx]
_mask = self._mask
def _is_scalar(m):
return not isinstance(m, np.ndarray)
def _scalar_heuristic(arr, elem):
"""
Return whether `elem` is a scalar result of indexing `arr`, or None
if undecidable without promoting nomask to a full mask
"""
# obviously a scalar
if not isinstance(elem, np.ndarray):
return True
# object array scalar indexing can return anything
elif arr.dtype.type is np.object_:
if arr.dtype is not elem.dtype:
# elem is an array, but dtypes do not match, so must be
# an element
return True
# well-behaved subclass that only returns 0d arrays when
# expected - this is not a scalar
elif type(arr).__getitem__ == ndarray.__getitem__:
return False
return None
if _mask is not nomask:
# _mask cannot be a subclass, so it tells us whether we should
# expect a scalar. It also cannot be of dtype object.
mout = _mask[indx]
scalar_expected = _is_scalar(mout)
else:
# attempt to apply the heuristic to avoid constructing a full mask
mout = nomask
scalar_expected = _scalar_heuristic(self.data, dout)
if scalar_expected is None:
# heuristics have failed
# construct a full array, so we can be certain. This is costly.
# we could also fall back on ndarray.__getitem__(self.data, indx)
scalar_expected = _is_scalar(getmaskarray(self)[indx])
# Did we extract a single item?
if scalar_expected:
# A record
if isinstance(dout, np.void):
# We should always re-cast to mvoid, otherwise users can
# change masks on rows that already have masked values, but not
# on rows that have no masked values, which is inconsistent.
return mvoid(dout, mask=mout, hardmask=self._hardmask)
# special case introduced in gh-5962
elif (
self.dtype.type is np.object_
and isinstance(dout, np.ndarray)
and dout is not masked
):
# If masked, turn into a MaskedArray, with everything masked.
if mout:
return MaskedArray(dout, mask=True)
else:
return dout
# Just a scalar
else:
if mout:
return masked
else:
return dout
else:
# Force dout to MA
dout = dout.view(type(self))
# Inherit attributes from self
dout._update_from(self)
# Check the fill_value
if is_string_or_list_of_strings(indx):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
# Something like gh-15895 has happened if this check fails.
# _fill_value should always be an ndarray.
if not isinstance(dout._fill_value, np.ndarray):
raise RuntimeError("Internal NumPy error.")
# If we're indexing a multidimensional field in a
# structured array (such as dtype("(2,)i2,(2,)i1")),
# dimensionality goes up (M[field].ndim == M.ndim +
# M.dtype[field].ndim). That's fine for
# M[field] but problematic for M[field].fill_value
# which should have shape () to avoid breaking several
# methods. There is no great way out, so set to
# first element. See issue #6723.
if dout._fill_value.ndim > 0:
if not (dout._fill_value == dout._fill_value.flat[0]).all():
warnings.warn(
"Upon accessing multidimensional field "
f"{indx!s}, need to keep dimensionality "
"of fill_value at 0. Discarding "
"heterogeneous fill_value and setting "
f"all to {dout._fill_value[0]!s}.",
stacklevel=2,
)
# Need to use `.flat[0:1].squeeze(...)` instead of just
# `.flat[0]` to ensure the result is a 0d array and not
# a scalar.
dout._fill_value = dout._fill_value.flat[0:1].squeeze(axis=0)
dout._isfield = True
# Update the mask if needed
if mout is not nomask:
# set shape to match that of data; this is needed for matrices
dout._mask = reshape(mout, dout.shape)
dout._sharedmask = True
# Note: Don't try to check for m.any(), that'll take too long
return dout
|
def __getitem__(self, indx):
"""
x.__getitem__(y) <==> x[y]
Return the item described by i, as a masked array.
"""
# We could directly use ndarray.__getitem__ on self.
# But then we would have to modify __array_finalize__ to prevent the
# mask of being reshaped if it hasn't been set up properly yet
# So it's easier to stick to the current version
dout = self.data[indx]
_mask = self._mask
def _is_scalar(m):
return not isinstance(m, np.ndarray)
def _scalar_heuristic(arr, elem):
"""
Return whether `elem` is a scalar result of indexing `arr`, or None
if undecidable without promoting nomask to a full mask
"""
# obviously a scalar
if not isinstance(elem, np.ndarray):
return True
# object array scalar indexing can return anything
elif arr.dtype.type is np.object_:
if arr.dtype is not elem.dtype:
# elem is an array, but dtypes do not match, so must be
# an element
return True
# well-behaved subclass that only returns 0d arrays when
# expected - this is not a scalar
elif type(arr).__getitem__ == ndarray.__getitem__:
return False
return None
if _mask is not nomask:
# _mask cannot be a subclass, so it tells us whether we should
# expect a scalar. It also cannot be of dtype object.
mout = _mask[indx]
scalar_expected = _is_scalar(mout)
else:
# attempt to apply the heuristic to avoid constructing a full mask
mout = nomask
scalar_expected = _scalar_heuristic(self.data, dout)
if scalar_expected is None:
# heuristics have failed
# construct a full array, so we can be certain. This is costly.
# we could also fall back on ndarray.__getitem__(self.data, indx)
scalar_expected = _is_scalar(getmaskarray(self)[indx])
# Did we extract a single item?
if scalar_expected:
# A record
if isinstance(dout, np.void):
# We should always re-cast to mvoid, otherwise users can
# change masks on rows that already have masked values, but not
# on rows that have no masked values, which is inconsistent.
return mvoid(dout, mask=mout, hardmask=self._hardmask)
# special case introduced in gh-5962
elif (
self.dtype.type is np.object_
and isinstance(dout, np.ndarray)
and dout is not masked
):
# If masked, turn into a MaskedArray, with everything masked.
if mout:
return MaskedArray(dout, mask=True)
else:
return dout
# Just a scalar
else:
if mout:
return masked
else:
return dout
else:
# Force dout to MA
dout = dout.view(type(self))
# Inherit attributes from self
dout._update_from(self)
# Check the fill_value
if is_string_or_list_of_strings(indx):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
# If we're indexing a multidimensional field in a
# structured array (such as dtype("(2,)i2,(2,)i1")),
# dimensionality goes up (M[field].ndim == M.ndim +
# M.dtype[field].ndim). That's fine for
# M[field] but problematic for M[field].fill_value
# which should have shape () to avoid breaking several
# methods. There is no great way out, so set to
# first element. See issue #6723.
if dout._fill_value.ndim > 0:
if not (dout._fill_value == dout._fill_value.flat[0]).all():
warnings.warn(
"Upon accessing multidimensional field "
f"{indx!s}, need to keep dimensionality "
"of fill_value at 0. Discarding "
"heterogeneous fill_value and setting "
f"all to {dout._fill_value[0]!s}.",
stacklevel=2,
)
dout._fill_value = dout._fill_value.flat[0]
dout._isfield = True
# Update the mask if needed
if mout is not nomask:
# set shape to match that of data; this is needed for matrices
dout._mask = reshape(mout, dout.shape)
dout._sharedmask = True
# Note: Don't try to check for m.any(), that'll take too long
return dout
|
https://github.com/numpy/numpy/issues/15895
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python37\lib\site-packages\numpy\ma\core.py", line 3276, in __getitem__
if dout._fill_value.ndim > 0:
AttributeError: 'str' object has no attribute 'ndim'
|
AttributeError
|
def _replace_nan(a, val):
"""
If `a` is of inexact type, make a copy of `a`, replace NaNs with
the `val` value, and return the copy together with a boolean mask
marking the locations where NaNs were present. If `a` is not of
inexact type, do nothing and return `a` together with a mask of None.
Note that scalars will end up as array scalars, which is important
for using the result as the value of the out argument in some
operations.
Parameters
----------
a : array-like
Input array.
val : float
NaN values are set to val before doing the operation.
Returns
-------
y : ndarray
If `a` is of inexact type, return a copy of `a` with the NaNs
replaced by the fill value, otherwise return `a`.
mask: {bool, None}
If `a` is of inexact type, return a boolean mask marking locations of
NaNs, otherwise return None.
"""
a = np.array(a, subok=True, copy=True)
if a.dtype == np.object_:
# object arrays do not support `isnan` (gh-9009), so make a guess
mask = a != a
elif issubclass(a.dtype.type, np.inexact):
mask = np.isnan(a)
else:
mask = None
if mask is not None:
np.copyto(a, val, where=mask)
return a, mask
|
def _replace_nan(a, val):
"""
If `a` is of inexact type, make a copy of `a`, replace NaNs with
the `val` value, and return the copy together with a boolean mask
marking the locations where NaNs were present. If `a` is not of
inexact type, do nothing and return `a` together with a mask of None.
Note that scalars will end up as array scalars, which is important
for using the result as the value of the out argument in some
operations.
Parameters
----------
a : array-like
Input array.
val : float
NaN values are set to val before doing the operation.
Returns
-------
y : ndarray
If `a` is of inexact type, return a copy of `a` with the NaNs
replaced by the fill value, otherwise return `a`.
mask: {bool, None}
If `a` is of inexact type, return a boolean mask marking locations of
NaNs, otherwise return None.
"""
a = np.array(a, subok=True, copy=True)
if not issubclass(a.dtype.type, np.inexact):
return a, None
mask = np.isnan(a)
np.copyto(a, val, where=mask)
return a, mask
|
https://github.com/numpy/numpy/issues/9008
|
In [2]: import numpy as np
In [3]: np.__version__
Out[3]: '1.13.0.dev0+bca7922'
In [4]: a = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=object)
In [5]: np.nanmax(a, axis=0)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-a020f98a2536> in <module>()
----> 1 np.nanmax(a, axis=0)
/Users/warren/miniconda3numpy/lib/python3.5/site-packages/numpy-1.13.0.dev0+bca7922-py3.5-macosx-10.6-x86_64.egg/numpy/lib/nanfunctions.py in nanmax(a, axis, out, keepdims)
343 # Fast, but not safe for subclasses of ndarray
344 res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
--> 345 if np.isnan(res).any():
346 warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2)
347 else:
TypeError: ufunc 'isnan' not supported for the input types, and the inputs could not be safely coerced to any supported types according to the casting rule ''safe''
|
TypeError
|
def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return minimum of an array or minimum along an axis, ignoring any NaNs.
When all-NaN slices are encountered a ``RuntimeWarning`` is raised and
Nan is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the minimum is computed. The default is to compute
the minimum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `min` method
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nanmin : ndarray
An array with the same shape as `a`, with the specified axis
removed. If `a` is a 0-d array, or if axis is None, an ndarray
scalar is returned. The same dtype as `a` is returned.
See Also
--------
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
amin :
The minimum value of an array along a given axis, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amax, fmax, maximum
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.min.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs["keepdims"] = keepdims
if type(a) is np.ndarray and a.dtype != np.object_:
# Fast, but not safe for subclasses of ndarray, or object arrays,
# which do not implement isnan (gh-9009), or fmin correctly (gh-8975)
res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, +np.inf)
res = np.amin(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
return res
|
def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return minimum of an array or minimum along an axis, ignoring any NaNs.
When all-NaN slices are encountered a ``RuntimeWarning`` is raised and
Nan is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the minimum is computed. The default is to compute
the minimum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `min` method
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nanmin : ndarray
An array with the same shape as `a`, with the specified axis
removed. If `a` is a 0-d array, or if axis is None, an ndarray
scalar is returned. The same dtype as `a` is returned.
See Also
--------
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
amin :
The minimum value of an array along a given axis, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amax, fmax, maximum
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.min.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs["keepdims"] = keepdims
if not isinstance(a, np.ndarray) or type(a) is np.ndarray:
# Fast, but not safe for subclasses of ndarray
res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, +np.inf)
res = np.amin(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
return res
|
https://github.com/numpy/numpy/issues/9008
|
In [2]: import numpy as np
In [3]: np.__version__
Out[3]: '1.13.0.dev0+bca7922'
In [4]: a = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=object)
In [5]: np.nanmax(a, axis=0)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-a020f98a2536> in <module>()
----> 1 np.nanmax(a, axis=0)
/Users/warren/miniconda3numpy/lib/python3.5/site-packages/numpy-1.13.0.dev0+bca7922-py3.5-macosx-10.6-x86_64.egg/numpy/lib/nanfunctions.py in nanmax(a, axis, out, keepdims)
343 # Fast, but not safe for subclasses of ndarray
344 res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
--> 345 if np.isnan(res).any():
346 warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2)
347 else:
TypeError: ufunc 'isnan' not supported for the input types, and the inputs could not be safely coerced to any supported types according to the casting rule ''safe''
|
TypeError
|
def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the maximum of an array or maximum along an axis, ignoring any
NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is
raised and NaN is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `max` method
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, an ndarray scalar is
returned. The same dtype as `a` is returned.
See Also
--------
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
amax :
The maximum value of an array along a given axis, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amin, fmin, minimum
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.max.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs["keepdims"] = keepdims
if type(a) is np.ndarray and a.dtype != np.object_:
# Fast, but not safe for subclasses of ndarray, or object arrays,
# which do not implement isnan (gh-9009), or fmax correctly (gh-8975)
res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, -np.inf)
res = np.amax(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
return res
|
def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the maximum of an array or maximum along an axis, ignoring any
NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is
raised and NaN is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `max` method
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, an ndarray scalar is
returned. The same dtype as `a` is returned.
See Also
--------
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
amax :
The maximum value of an array along a given axis, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amin, fmin, minimum
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.max.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs["keepdims"] = keepdims
if not isinstance(a, np.ndarray) or type(a) is np.ndarray:
# Fast, but not safe for subclasses of ndarray
res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, -np.inf)
res = np.amax(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
return res
|
https://github.com/numpy/numpy/issues/9008
|
In [2]: import numpy as np
In [3]: np.__version__
Out[3]: '1.13.0.dev0+bca7922'
In [4]: a = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=object)
In [5]: np.nanmax(a, axis=0)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-a020f98a2536> in <module>()
----> 1 np.nanmax(a, axis=0)
/Users/warren/miniconda3numpy/lib/python3.5/site-packages/numpy-1.13.0.dev0+bca7922-py3.5-macosx-10.6-x86_64.egg/numpy/lib/nanfunctions.py in nanmax(a, axis, out, keepdims)
343 # Fast, but not safe for subclasses of ndarray
344 res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
--> 345 if np.isnan(res).any():
346 warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2)
347 else:
TypeError: ufunc 'isnan' not supported for the input types, and the inputs could not be safely coerced to any supported types according to the casting rule ''safe''
|
TypeError
|
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord`
is None. If both `axis` and `ord` are None, the 2-norm of
``x.ravel`` will be returned.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object. The default is None.
axis : {None, int, 2-tuple of ints}, optional.
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default
is None.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
See Also
--------
scipy.linalg.norm : Similar function in SciPy.
Notes
-----
For values of ``ord < 1``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
Both the Frobenius and nuclear norm orders are only defined for
matrices and raise a ValueError when ``x.ndim != 2``.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, ..., 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
0.0
>>> LA.norm(b, -2)
1.8570331885190563e-016 # may vary
>>> LA.norm(a, 3)
5.8480354764257312 # may vary
>>> LA.norm(a, -3)
0.0
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if (
(ord is None)
or (ord in ("f", "fro") and ndim == 2)
or (ord == 2 and ndim == 1)
):
x = x.ravel(order="K")
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim * [1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except Exception:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
# None of the str-type keywords for ord ('fro', 'nuc')
# are valid for vectors
elif isinstance(ord, str):
raise ValueError(f"Invalid norm order '{ord}' for vectors")
else:
absx = abs(x)
absx **= ord
ret = add.reduce(absx, axis=axis, keepdims=keepdims)
ret **= 1 / ord
return ret
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError("Duplicate axes given.")
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, "fro", "f"]:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == "nuc":
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
|
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord`
is None. If both `axis` and `ord` are None, the 2-norm of
``x.ravel`` will be returned.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object. The default is None.
axis : {None, int, 2-tuple of ints}, optional.
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default
is None.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
See Also
--------
scipy.linalg.norm : Similar function in SciPy.
Notes
-----
For values of ``ord < 1``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, ..., 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
0.0
>>> LA.norm(b, -2)
1.8570331885190563e-016 # may vary
>>> LA.norm(a, 3)
5.8480354764257312 # may vary
>>> LA.norm(a, -3)
0.0
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if (
(ord is None)
or (ord in ("f", "fro") and ndim == 2)
or (ord == 2 and ndim == 1)
):
x = x.ravel(order="K")
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim * [1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except Exception:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
absx = abs(x)
absx **= ord
ret = add.reduce(absx, axis=axis, keepdims=keepdims)
ret **= 1 / ord
return ret
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError("Duplicate axes given.")
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, "fro", "f"]:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == "nuc":
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
|
https://github.com/numpy/numpy/issues/15533
|
1.7594677278427366
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
//anaconda3/lib/python3.7/site-packages/numpy/linalg/linalg.py in norm(x, ord, axis, keepdims)
2515 try:
-> 2516 ord + 1
2517 except TypeError:
TypeError: can only concatenate str (not "int") to str
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-18-2ace847024a5> in <module>
3 b = np.squeeze(a)
4 print(np.linalg.norm(a, 'fro'))
----> 5 print(np.linalg.norm(b, 'fro'))
<__array_function__ internals> in norm(*args, **kwargs)
//anaconda3/lib/python3.7/site-packages/numpy/linalg/linalg.py in norm(x, ord, axis, keepdims)
2516 ord + 1
2517 except TypeError:
-> 2518 raise ValueError("Invalid norm order for vectors.")
2519 absx = abs(x)
2520 absx **= ord
ValueError: Invalid norm order for vectors.
|
TypeError
|
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, or the length
of the two's complement if `num` is negative, provided that `width` is
at least a sufficient number of bits for `num` to be represented in the
designated form.
If the `width` value is insufficient, it will be ignored, and `num` will
be returned in binary (`num` > 0) or two's complement (`num` < 0) form
with its width equal to the minimum number of bits needed to represent
the number in the designated form. This behavior is deprecated and will
later raise an error.
.. deprecated:: 1.12.0
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
bin: Python's built-in binary representation generator of an integer.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=3)
'101'
>>> np.binary_repr(-3, width=5)
'11101'
"""
def warn_if_insufficient(width, binwidth):
if width is not None and width < binwidth:
warnings.warn(
"Insufficient bit width provided. This behavior "
"will raise an error in the future.",
DeprecationWarning,
stacklevel=3,
)
# Ensure that num is a Python integer to avoid overflow or unwanted
# casts to floating point.
num = operator.index(num)
if num == 0:
return "0" * (width or 1)
elif num > 0:
binary = bin(num)[2:]
binwidth = len(binary)
outwidth = binwidth if width is None else max(binwidth, width)
warn_if_insufficient(width, binwidth)
return binary.zfill(outwidth)
else:
if width is None:
return "-" + bin(-num)[2:]
else:
poswidth = len(bin(-num)[2:])
# See gh-8679: remove extra digit
# for numbers at boundaries.
if 2 ** (poswidth - 1) == -num:
poswidth -= 1
twocomp = 2 ** (poswidth + 1) + num
binary = bin(twocomp)[2:]
binwidth = len(binary)
outwidth = max(binwidth, width)
warn_if_insufficient(width, binwidth)
return "1" * (outwidth - binwidth) + binary
|
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, or the length
of the two's complement if `num` is negative, provided that `width` is
at least a sufficient number of bits for `num` to be represented in the
designated form.
If the `width` value is insufficient, it will be ignored, and `num` will
be returned in binary (`num` > 0) or two's complement (`num` < 0) form
with its width equal to the minimum number of bits needed to represent
the number in the designated form. This behavior is deprecated and will
later raise an error.
.. deprecated:: 1.12.0
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
bin: Python's built-in binary representation generator of an integer.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=3)
'101'
>>> np.binary_repr(-3, width=5)
'11101'
"""
def warn_if_insufficient(width, binwidth):
if width is not None and width < binwidth:
warnings.warn(
"Insufficient bit width provided. This behavior "
"will raise an error in the future.",
DeprecationWarning,
stacklevel=3,
)
if num == 0:
return "0" * (width or 1)
elif num > 0:
binary = bin(num)[2:]
binwidth = len(binary)
outwidth = binwidth if width is None else max(binwidth, width)
warn_if_insufficient(width, binwidth)
return binary.zfill(outwidth)
else:
if width is None:
return "-" + bin(-num)[2:]
else:
poswidth = len(bin(-num)[2:])
# See gh-8679: remove extra digit
# for numbers at boundaries.
if 2 ** (poswidth - 1) == -num:
poswidth -= 1
twocomp = 2 ** (poswidth + 1) + num
binary = bin(twocomp)[2:]
binwidth = len(binary)
outwidth = max(binwidth, width)
warn_if_insufficient(width, binwidth)
return "1" * (outwidth - binwidth) + binary
|
https://github.com/numpy/numpy/issues/14289
|
System info:
3.6.8rc1 (v3.6.8rc1:cc3e73212a, Dec 11 2018, 17:37:34)
[GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)]
NumPy version: 1.17.0
val = -2305843009213693952
Python int: 1110000000000000000000000000000000000000000000000000000000000000
NumPy int64: 1110000000000000000000000000000000000000000000000000000000000000
val = -4611686018427387905
Python int: 1011111111111111111111111111111111111111111111111111111111111111
NumPy int64: 1011111111111111111111111111111111111111111111111111111111111111
val = -4611686018427387904
Python int: 1100000000000000000000000000000000000000000000000000000000000000
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-29-c41b585b0ed8> in <module>
8 print("val = {}".format(val))
9 print("Python int: {}".format(np.binary_repr(val, 64)))
---> 10 print("NumPy int64: {}\n".format(np.binary_repr(np.int64(val), 64)))
~/.local/share/virtualenvs/ai_tools-DEcp7MjW/lib/python3.6/site-packages/numpy/core/numeric.py in binary_repr(num, width)
1960
1961 twocomp = 2**(poswidth + 1) + num
-> 1962 binary = bin(twocomp)[2:]
1963 binwidth = len(binary)
1964
TypeError: 'numpy.float64' object cannot be interpreted as an integer
|
TypeError
|
def __new__(subtype, filename, dtype=uint8, mode="r+", offset=0, shape=None, order="C"):
# Import here to minimize 'import numpy' overhead
import mmap
import os.path
try:
mode = mode_equivalents[mode]
except KeyError:
if mode not in valid_filemodes:
raise ValueError(
"mode must be one of %s"
% (valid_filemodes + list(mode_equivalents.keys()))
)
if mode == "w+" and shape is None:
raise ValueError("shape must be given")
if hasattr(filename, "read"):
f_ctx = contextlib_nullcontext(filename)
else:
f_ctx = open(os_fspath(filename), ("r" if mode == "c" else mode) + "b")
with f_ctx as fid:
fid.seek(0, 2)
flen = fid.tell()
descr = dtypedescr(dtype)
_dbytes = descr.itemsize
if shape is None:
bytes = flen - offset
if bytes % _dbytes:
raise ValueError(
"Size of available data is not a multiple of the data-type size."
)
size = bytes // _dbytes
shape = (size,)
else:
if not isinstance(shape, tuple):
shape = (shape,)
size = np.intp(1) # avoid default choice of np.int_, which might overflow
for k in shape:
size *= k
bytes = long(offset + size * _dbytes)
if mode in ("w+", "r+") and flen < bytes:
fid.seek(bytes - 1, 0)
fid.write(b"\0")
fid.flush()
if mode == "c":
acc = mmap.ACCESS_COPY
elif mode == "r":
acc = mmap.ACCESS_READ
else:
acc = mmap.ACCESS_WRITE
start = offset - offset % mmap.ALLOCATIONGRANULARITY
bytes -= start
array_offset = offset - start
mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
self = ndarray.__new__(
subtype, shape, dtype=descr, buffer=mm, offset=array_offset, order=order
)
self._mmap = mm
self.offset = offset
self.mode = mode
if is_pathlib_path(filename):
# special case - if we were constructed with a pathlib.path,
# then filename is a path object, not a string
self.filename = filename.resolve()
elif hasattr(fid, "name") and isinstance(fid.name, basestring):
# py3 returns int for TemporaryFile().name
self.filename = os.path.abspath(fid.name)
# same as memmap copies (e.g. memmap + 1)
else:
self.filename = None
return self
|
def __new__(subtype, filename, dtype=uint8, mode="r+", offset=0, shape=None, order="C"):
# Import here to minimize 'import numpy' overhead
import mmap
import os.path
try:
mode = mode_equivalents[mode]
except KeyError:
if mode not in valid_filemodes:
raise ValueError(
"mode must be one of %s"
% (valid_filemodes + list(mode_equivalents.keys()))
)
if mode == "w+" and shape is None:
raise ValueError("shape must be given")
if hasattr(filename, "read"):
f_ctx = contextlib_nullcontext(filename)
else:
f_ctx = open(os_fspath(filename), ("r" if mode == "c" else mode) + "b")
with f_ctx as fid:
fid.seek(0, 2)
flen = fid.tell()
descr = dtypedescr(dtype)
_dbytes = descr.itemsize
if shape is None:
bytes = flen - offset
if bytes % _dbytes:
raise ValueError(
"Size of available data is not a multiple of the data-type size."
)
size = bytes // _dbytes
shape = (size,)
else:
if not isinstance(shape, tuple):
shape = (shape,)
size = np.intp(1) # avoid default choice of np.int_, which might overflow
for k in shape:
size *= k
bytes = long(offset + size * _dbytes)
if mode == "w+" or (mode == "r+" and flen < bytes):
fid.seek(bytes - 1, 0)
fid.write(b"\0")
fid.flush()
if mode == "c":
acc = mmap.ACCESS_COPY
elif mode == "r":
acc = mmap.ACCESS_READ
else:
acc = mmap.ACCESS_WRITE
start = offset - offset % mmap.ALLOCATIONGRANULARITY
bytes -= start
array_offset = offset - start
mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
self = ndarray.__new__(
subtype, shape, dtype=descr, buffer=mm, offset=array_offset, order=order
)
self._mmap = mm
self.offset = offset
self.mode = mode
if is_pathlib_path(filename):
# special case - if we were constructed with a pathlib.path,
# then filename is a path object, not a string
self.filename = filename.resolve()
elif hasattr(fid, "name") and isinstance(fid.name, basestring):
# py3 returns int for TemporaryFile().name
self.filename = os.path.abspath(fid.name)
# same as memmap copies (e.g. memmap + 1)
else:
self.filename = None
return self
|
https://github.com/numpy/numpy/issues/12653
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.5/dist-packages/numpy/core/memmap.py", line 250, in __new__
fid.seek(bytes - 1, 0)
OSError: [Errno 22] Invalid argument
|
OSError
|
def _exec_command(command, use_shell=None, use_tee=None, **env):
log.debug("_exec_command(...)")
if use_shell is None:
use_shell = os.name == "posix"
if use_tee is None:
use_tee = os.name == "posix"
using_command = 0
if use_shell:
# We use shell (unless use_shell==0) so that wildcards can be
# used.
sh = os.environ.get("SHELL", "/bin/sh")
if is_sequence(command):
argv = [sh, "-c", " ".join(list(command))]
else:
argv = [sh, "-c", command]
else:
# On NT, DOS we avoid using command.com as it's exit status is
# not related to the exit status of a command.
if is_sequence(command):
argv = command[:]
else:
argv = shlex.split(command)
# `spawn*p` family with path (vp, vpe, ...) are not available on windows.
# Also prefer spawn{v,vp} in favor of spawn{ve,vpe} if no env
# modification is actually requested as the *e* functions are not thread
# safe on windows (https://bugs.python.org/issue6476)
if hasattr(os, "spawnvpe"):
spawn_command = os.spawnvpe if env else os.spawnvp
else:
spawn_command = os.spawnve if env else os.spawnv
argv[0] = find_executable(argv[0]) or argv[0]
if not os.path.isfile(argv[0]):
log.warn("Executable %s does not exist" % (argv[0]))
if os.name in ["nt", "dos"]:
# argv[0] might be internal command
argv = [os.environ["COMSPEC"], "/C"] + argv
using_command = 1
_so_has_fileno = _supports_fileno(sys.stdout)
_se_has_fileno = _supports_fileno(sys.stderr)
so_flush = sys.stdout.flush
se_flush = sys.stderr.flush
if _so_has_fileno:
so_fileno = sys.stdout.fileno()
so_dup = os.dup(so_fileno)
if _se_has_fileno:
se_fileno = sys.stderr.fileno()
se_dup = os.dup(se_fileno)
outfile = temp_file_name()
fout = open(outfile, "w")
if using_command:
errfile = temp_file_name()
ferr = open(errfile, "w")
log.debug(
"Running %s(%s,%r,%r,os.environ)"
% (spawn_command.__name__, os.P_WAIT, argv[0], argv)
)
if env and sys.version_info[0] >= 3 and os.name == "nt":
# Pre-encode os.environ, discarding un-encodable entries,
# to avoid it failing during encoding as part of spawn. Failure
# is possible if the environment contains entries that are not
# encoded using the system codepage as windows expects.
#
# This is not necessary on unix, where os.environ is encoded
# using the surrogateescape error handler and decoded using
# it as part of spawn.
encoded_environ = {}
for k, v in os.environ.items():
try:
encoded_environ[k.encode(sys.getfilesystemencoding())] = v.encode(
sys.getfilesystemencoding()
)
except UnicodeEncodeError:
log.debug("ignoring un-encodable env entry %s", k)
else:
encoded_environ = os.environ
argv0 = argv[0]
if not using_command:
argv[0] = quote_arg(argv0)
so_flush()
se_flush()
if _so_has_fileno:
os.dup2(fout.fileno(), so_fileno)
if _se_has_fileno:
if using_command:
# XXX: disabled for now as it does not work from cmd under win32.
# Tests fail on msys
os.dup2(ferr.fileno(), se_fileno)
else:
os.dup2(fout.fileno(), se_fileno)
try:
# Use spawnv in favor of spawnve, unless necessary
if env:
status = spawn_command(os.P_WAIT, argv0, argv, encoded_environ)
else:
status = spawn_command(os.P_WAIT, argv0, argv)
except Exception:
errmess = str(get_exception())
status = 999
sys.stderr.write("%s: %s" % (errmess, argv[0]))
so_flush()
se_flush()
if _so_has_fileno:
os.dup2(so_dup, so_fileno)
os.close(so_dup)
if _se_has_fileno:
os.dup2(se_dup, se_fileno)
os.close(se_dup)
fout.close()
fout = open_latin1(outfile, "r")
text = fout.read()
fout.close()
os.remove(outfile)
if using_command:
ferr.close()
ferr = open_latin1(errfile, "r")
errmess = ferr.read()
ferr.close()
os.remove(errfile)
if errmess and not status:
# Not sure how to handle the case where errmess
# contains only warning messages and that should
# not be treated as errors.
# status = 998
if text:
text = text + "\n"
# text = '%sCOMMAND %r FAILED: %s' %(text,command,errmess)
text = text + errmess
print(errmess)
if text[-1:] == "\n":
text = text[:-1]
if status is None:
status = 0
if use_tee:
print(text)
return status, text
|
def _exec_command(command, use_shell=None, use_tee=None, **env):
log.debug("_exec_command(...)")
if use_shell is None:
use_shell = os.name == "posix"
if use_tee is None:
use_tee = os.name == "posix"
using_command = 0
if use_shell:
# We use shell (unless use_shell==0) so that wildcards can be
# used.
sh = os.environ.get("SHELL", "/bin/sh")
if is_sequence(command):
argv = [sh, "-c", " ".join(list(command))]
else:
argv = [sh, "-c", command]
else:
# On NT, DOS we avoid using command.com as it's exit status is
# not related to the exit status of a command.
if is_sequence(command):
argv = command[:]
else:
argv = shlex.split(command)
if hasattr(os, "spawnvpe"):
spawn_command = os.spawnvpe
else:
spawn_command = os.spawnve
argv[0] = find_executable(argv[0]) or argv[0]
if not os.path.isfile(argv[0]):
log.warn("Executable %s does not exist" % (argv[0]))
if os.name in ["nt", "dos"]:
# argv[0] might be internal command
argv = [os.environ["COMSPEC"], "/C"] + argv
using_command = 1
_so_has_fileno = _supports_fileno(sys.stdout)
_se_has_fileno = _supports_fileno(sys.stderr)
so_flush = sys.stdout.flush
se_flush = sys.stderr.flush
if _so_has_fileno:
so_fileno = sys.stdout.fileno()
so_dup = os.dup(so_fileno)
if _se_has_fileno:
se_fileno = sys.stderr.fileno()
se_dup = os.dup(se_fileno)
outfile = temp_file_name()
fout = open(outfile, "w")
if using_command:
errfile = temp_file_name()
ferr = open(errfile, "w")
log.debug(
"Running %s(%s,%r,%r,os.environ)"
% (spawn_command.__name__, os.P_WAIT, argv[0], argv)
)
if sys.version_info[0] >= 3 and os.name == "nt":
# Pre-encode os.environ, discarding un-encodable entries,
# to avoid it failing during encoding as part of spawn. Failure
# is possible if the environment contains entries that are not
# encoded using the system codepage as windows expects.
#
# This is not necessary on unix, where os.environ is encoded
# using the surrogateescape error handler and decoded using
# it as part of spawn.
encoded_environ = {}
for k, v in os.environ.items():
try:
encoded_environ[k.encode(sys.getfilesystemencoding())] = v.encode(
sys.getfilesystemencoding()
)
except UnicodeEncodeError:
log.debug("ignoring un-encodable env entry %s", k)
else:
encoded_environ = os.environ
argv0 = argv[0]
if not using_command:
argv[0] = quote_arg(argv0)
so_flush()
se_flush()
if _so_has_fileno:
os.dup2(fout.fileno(), so_fileno)
if _se_has_fileno:
if using_command:
# XXX: disabled for now as it does not work from cmd under win32.
# Tests fail on msys
os.dup2(ferr.fileno(), se_fileno)
else:
os.dup2(fout.fileno(), se_fileno)
try:
status = spawn_command(os.P_WAIT, argv0, argv, encoded_environ)
except Exception:
errmess = str(get_exception())
status = 999
sys.stderr.write("%s: %s" % (errmess, argv[0]))
so_flush()
se_flush()
if _so_has_fileno:
os.dup2(so_dup, so_fileno)
os.close(so_dup)
if _se_has_fileno:
os.dup2(se_dup, se_fileno)
os.close(se_dup)
fout.close()
fout = open_latin1(outfile, "r")
text = fout.read()
fout.close()
os.remove(outfile)
if using_command:
ferr.close()
ferr = open_latin1(errfile, "r")
errmess = ferr.read()
ferr.close()
os.remove(errfile)
if errmess and not status:
# Not sure how to handle the case where errmess
# contains only warning messages and that should
# not be treated as errors.
# status = 998
if text:
text = text + "\n"
# text = '%sCOMMAND %r FAILED: %s' %(text,command,errmess)
text = text + errmess
print(errmess)
if text[-1:] == "\n":
text = text[:-1]
if status is None:
status = 0
if use_tee:
print(text)
return status, text
|
https://github.com/numpy/numpy/issues/7607
|
Traceback (most recent call last):
File "\cygwin64\home\biolab\spawnve.py", line 46, in run
print(exec_command([self.executable] + self.args + [str(i)]))
File "C:\Python35-32\lib\site-packages\numpy\distutils\exec_command.py", line
235, in exec_command
use_tee=use_tee,**env)
File "C:\Python35-32\lib\site-packages\numpy\distutils\exec_command.py", line
453, in _exec_command
os.remove(outfile)
PermissionError: [WinError 32] The process cannot access the file because it is
being used by another process: 'C:\\Users\\biolab\\AppData\\Local\\Temp\\tmpx50_
dmz1\\9sq2lgq8'
|
PermissionError
|
def _ctype_ndarray(element_type, shape):
"""Create an ndarray of the given element type and shape"""
for dim in shape[::-1]:
element_type = dim * element_type
# prevent the type name include np.ctypeslib
element_type.__module__ = None
return element_type
|
def _ctype_ndarray(element_type, shape):
"""Create an ndarray of the given element type and shape"""
for dim in shape[::-1]:
element_type = element_type * dim
return element_type
|
https://github.com/numpy/numpy/issues/6176
|
KeyError Traceback (most recent call last)
<ipython-input-3-e5712fdde6f3> in <module>()
----> 1 b = np.ctypeslib.as_ctypes(a)
/home/chug/.local/lib/python2.7/site-packages/numpy/ctypeslib.py in as_ctypes(obj)
419 if readonly:
420 raise TypeError("readonly arrays unsupported")
--> 421 tp = _typecodes[ai["typestr"]]
422 for dim in ai["shape"][::-1]:
423 tp = tp * dim
KeyError: '|b1'
|
KeyError
|
def as_ctypes(obj):
"""Create and return a ctypes object from a numpy array. Actually
anything that exposes the __array_interface__ is accepted."""
ai = obj.__array_interface__
if ai["strides"]:
raise TypeError("strided arrays not supported")
if ai["version"] != 3:
raise TypeError("only __array_interface__ version 3 supported")
addr, readonly = ai["data"]
if readonly:
raise TypeError("readonly arrays unsupported")
dtype = _dtype((ai["typestr"], ai["shape"]))
result = as_ctypes_type(dtype).from_address(addr)
result.__keep = obj
return result
|
def as_ctypes(obj):
"""Create and return a ctypes object from a numpy array. Actually
anything that exposes the __array_interface__ is accepted."""
ai = obj.__array_interface__
if ai["strides"]:
raise TypeError("strided arrays not supported")
if ai["version"] != 3:
raise TypeError("only __array_interface__ version 3 supported")
addr, readonly = ai["data"]
if readonly:
raise TypeError("readonly arrays unsupported")
tp = _ctype_ndarray(_typecodes[ai["typestr"]], ai["shape"])
result = tp.from_address(addr)
result.__keep = obj
return result
|
https://github.com/numpy/numpy/issues/6176
|
KeyError Traceback (most recent call last)
<ipython-input-3-e5712fdde6f3> in <module>()
----> 1 b = np.ctypeslib.as_ctypes(a)
/home/chug/.local/lib/python2.7/site-packages/numpy/ctypeslib.py in as_ctypes(obj)
419 if readonly:
420 raise TypeError("readonly arrays unsupported")
--> 421 tp = _typecodes[ai["typestr"]]
422 for dim in ai["shape"][::-1]:
423 tp = tp * dim
KeyError: '|b1'
|
KeyError
|
def _read_array_header(fp, version):
"""
see read_array_header_1_0
"""
# Read an unsigned, little-endian short int which has the length of the
# header.
import struct
if version == (1, 0):
hlength_type = "<H"
elif version == (2, 0):
hlength_type = "<I"
else:
raise ValueError("Invalid version %r" % version)
hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
header_length = struct.unpack(hlength_type, hlength_str)[0]
header = _read_bytes(fp, header_length, "array header")
# The header is a pretty-printed string representation of a literal
# Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
# boundary. The keys are strings.
# "shape" : tuple of int
# "fortran_order" : bool
# "descr" : dtype.descr
header = _filter_header(header)
try:
d = safe_eval(header)
except SyntaxError as e:
msg = "Cannot parse header: %r\nException: %r"
raise ValueError(msg % (header, e))
if not isinstance(d, dict):
msg = "Header is not a dictionary: %r"
raise ValueError(msg % d)
keys = sorted(d.keys())
if keys != ["descr", "fortran_order", "shape"]:
msg = "Header does not contain the correct keys: %r"
raise ValueError(msg % (keys,))
# Sanity-check the values.
if not isinstance(d["shape"], tuple) or not numpy.all(
[isinstance(x, (int, long)) for x in d["shape"]]
):
msg = "shape is not valid: %r"
raise ValueError(msg % (d["shape"],))
if not isinstance(d["fortran_order"], bool):
msg = "fortran_order is not a valid bool: %r"
raise ValueError(msg % (d["fortran_order"],))
try:
dtype = descr_to_dtype(d["descr"])
except TypeError as e:
msg = "descr is not a valid dtype descriptor: %r"
raise ValueError(msg % (d["descr"],))
return d["shape"], d["fortran_order"], dtype
|
def _read_array_header(fp, version):
"""
see read_array_header_1_0
"""
# Read an unsigned, little-endian short int which has the length of the
# header.
import struct
if version == (1, 0):
hlength_type = "<H"
elif version == (2, 0):
hlength_type = "<I"
else:
raise ValueError("Invalid version %r" % version)
hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
header_length = struct.unpack(hlength_type, hlength_str)[0]
header = _read_bytes(fp, header_length, "array header")
# The header is a pretty-printed string representation of a literal
# Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
# boundary. The keys are strings.
# "shape" : tuple of int
# "fortran_order" : bool
# "descr" : dtype.descr
header = _filter_header(header)
try:
d = safe_eval(header)
except SyntaxError as e:
msg = "Cannot parse header: %r\nException: %r"
raise ValueError(msg % (header, e))
if not isinstance(d, dict):
msg = "Header is not a dictionary: %r"
raise ValueError(msg % d)
keys = sorted(d.keys())
if keys != ["descr", "fortran_order", "shape"]:
msg = "Header does not contain the correct keys: %r"
raise ValueError(msg % (keys,))
# Sanity-check the values.
if not isinstance(d["shape"], tuple) or not numpy.all(
[isinstance(x, (int, long)) for x in d["shape"]]
):
msg = "shape is not valid: %r"
raise ValueError(msg % (d["shape"],))
if not isinstance(d["fortran_order"], bool):
msg = "fortran_order is not a valid bool: %r"
raise ValueError(msg % (d["fortran_order"],))
try:
dtype = numpy.dtype(d["descr"])
except TypeError as e:
msg = "descr is not a valid dtype descriptor: %r"
raise ValueError(msg % (d["descr"],))
return d["shape"], d["fortran_order"], dtype
|
https://github.com/numpy/numpy/issues/2215
|
import numpy as np
t = np.dtype('i1, i4, i1', align=True)
d = np.zeros(1, t)
np.save("test.npy", d)
data = np.load("test.npy")
Traceback (most recent call last):
File "D:\Projects\Cuda\Cuda_Git\pathwise\liinc\model\feeds\numpy_bug.py", line 8, in <module>
data = np.load("test.npy")
File "D:\Projects\Cuda\Cuda_Git\pathwise\pathwise\vendors\lib64\x64\python\numpy\lib\npyio.py", line 314, in load
return format.read_array(fid)
File "D:\Projects\Cuda\Cuda_Git\pathwise\pathwise\vendors\lib64\x64\python\numpy\lib\format.py", line 440, in read_array
shape, fortran_order, dtype = read_array_header_1_0(fp)
File "D:\Projects\Cuda\Cuda_Git\pathwise\pathwise\vendors\lib64\x64\python\numpy\lib\format.py", line 358, in read_array_header_1_0
dtype = numpy.dtype(d['descr'])
ValueError: two fields with the same name
|
ValueError
|
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters split the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small
is defined by `precision`, if the precision is 8 then
numbers smaller than 5e-9 are represented as zero.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([ 0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([ 0.000001, 0. , 2. , 3. ])'
"""
return _array_repr_implementation(arr, max_line_width, precision, suppress_small)
|
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters split the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small
is defined by `precision`, if the precision is 8 then
numbers smaller than 5e-9 are represented as zero.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([ 0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([ 0.000001, 0. , 2. , 3. ])'
"""
if max_line_width is None:
max_line_width = _format_options["linewidth"]
if type(arr) is not ndarray:
class_name = type(arr).__name__
else:
class_name = "array"
skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0
prefix = class_name + "("
suffix = ")" if skipdtype else ","
if _format_options["legacy"] == "1.13" and arr.shape == () and not arr.dtype.names:
lst = repr(arr.item())
elif arr.size > 0 or arr.shape == (0,):
lst = array2string(
arr, max_line_width, precision, suppress_small, ", ", prefix, suffix=suffix
)
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
arr_str = prefix + lst + suffix
if skipdtype:
return arr_str
dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype))
# compute whether we should put dtype on a new line: Do so if adding the
# dtype would extend the last line past max_line_width.
# Note: This line gives the correct result even when rfind returns -1.
last_line_len = len(arr_str) - (arr_str.rfind("\n") + 1)
spacer = " "
if _format_options["legacy"] == "1.13":
if issubclass(arr.dtype.type, flexible):
spacer = "\n" + " " * len(class_name + "(")
elif last_line_len + len(dtype_str) + 1 > max_line_width:
spacer = "\n" + " " * len(class_name + "(")
return arr_str + spacer + dtype_str
|
https://github.com/numpy/numpy/issues/12162
|
In [1]: import numpy as np
In [2]: class Sub(np.ndarray):
...: def __array_function__(*args, **kwargs):
...: return NotImplemented
...:
In [3]: repr(np.array(1).view(Sub))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-6-d6ff958f4fcf> in <module>()
----> 1 repr(np.array(1).view(Sub))
~/dev/numpy/numpy/core/overrides.py in public_api(*args, **kwargs)
149 relevant_args = dispatcher(*args, **kwargs)
150 return array_function_implementation_or_override(
--> 151 implementation, public_api, relevant_args, args, kwargs)
152 return public_api
153
~/dev/numpy/numpy/core/overrides.py in array_function_implementation_or_override(implementation, public_api, relevant_args, args, kwargs)
108 raise TypeError('no implementation found for {} on types that implement '
109 '__array_function__: {}'
--> 110 .format(public_api, list(map(type, overloaded_args))))
111
112
TypeError: no implementation found for <function array_repr at 0x105692e18> on types that implement __array_function__: [<class '__main__.Sub'>]
|
TypeError
|
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function is
similar to `array_repr`, the difference being that `array_repr` also
returns information on the kind of array and its data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`. The
default is, indirectly, 75.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
'[0 1 2]'
"""
return _array_str_implementation(a, max_line_width, precision, suppress_small)
|
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function is
similar to `array_repr`, the difference being that `array_repr` also
returns information on the kind of array and its data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`. The
default is, indirectly, 75.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
'[0 1 2]'
"""
if _format_options["legacy"] == "1.13" and a.shape == () and not a.dtype.names:
return str(a.item())
# the str of 0d arrays is a special case: It should appear like a scalar,
# so floats are not truncated by `precision`, and strings are not wrapped
# in quotes. So we return the str of the scalar value.
if a.shape == ():
# obtain a scalar and call str on it, avoiding problems for subclasses
# for which indexing with () returns a 0d instead of a scalar by using
# ndarray's getindex. Also guard against recursive 0d object arrays.
return _guarded_str(np.ndarray.__getitem__(a, ()))
return array2string(a, max_line_width, precision, suppress_small, " ", "")
|
https://github.com/numpy/numpy/issues/12162
|
In [1]: import numpy as np
In [2]: class Sub(np.ndarray):
...: def __array_function__(*args, **kwargs):
...: return NotImplemented
...:
In [3]: repr(np.array(1).view(Sub))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-6-d6ff958f4fcf> in <module>()
----> 1 repr(np.array(1).view(Sub))
~/dev/numpy/numpy/core/overrides.py in public_api(*args, **kwargs)
149 relevant_args = dispatcher(*args, **kwargs)
150 return array_function_implementation_or_override(
--> 151 implementation, public_api, relevant_args, args, kwargs)
152 return public_api
153
~/dev/numpy/numpy/core/overrides.py in array_function_implementation_or_override(implementation, public_api, relevant_args, args, kwargs)
108 raise TypeError('no implementation found for {} on types that implement '
109 '__array_function__: {}'
--> 110 .format(public_api, list(map(type, overloaded_args))))
111
112
TypeError: no implementation found for <function array_repr at 0x105692e18> on types that implement __array_function__: [<class '__main__.Sub'>]
|
TypeError
|
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
Parameters
----------
f : function or None
Function to be used to pretty print arrays. The function should expect
a single array argument and return a string of the representation of
the array. If None, the function is reset to the default NumPy function
to print arrays.
repr : bool, optional
If True (default), the function for pretty printing (``__repr__``)
is set, if False the function that returns the default string
representation (``__str__``) is set.
See Also
--------
set_printoptions, get_printoptions
Examples
--------
>>> def pprint(arr):
... return 'HA! - What are you going to do now?'
...
>>> np.set_string_function(pprint)
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
>>> print(a)
[0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
>>> np.set_string_function(None)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
`repr` affects either pretty printing or normal string representation.
Note that ``__repr__`` is still affected by setting ``__str__``
because the width of each array element in the returned string becomes
equal to the length of the result of ``__str__()``.
>>> x = np.arange(4)
>>> np.set_string_function(lambda x:'random', repr=False)
>>> x.__str__()
'random'
>>> x.__repr__()
'array([ 0, 1, 2, 3])'
"""
if f is None:
if repr:
return multiarray.set_string_function(_default_array_repr, 1)
else:
return multiarray.set_string_function(_default_array_str, 0)
else:
return multiarray.set_string_function(f, repr)
|
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
Parameters
----------
f : function or None
Function to be used to pretty print arrays. The function should expect
a single array argument and return a string of the representation of
the array. If None, the function is reset to the default NumPy function
to print arrays.
repr : bool, optional
If True (default), the function for pretty printing (``__repr__``)
is set, if False the function that returns the default string
representation (``__str__``) is set.
See Also
--------
set_printoptions, get_printoptions
Examples
--------
>>> def pprint(arr):
... return 'HA! - What are you going to do now?'
...
>>> np.set_string_function(pprint)
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
>>> print(a)
[0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
>>> np.set_string_function(None)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
`repr` affects either pretty printing or normal string representation.
Note that ``__repr__`` is still affected by setting ``__str__``
because the width of each array element in the returned string becomes
equal to the length of the result of ``__str__()``.
>>> x = np.arange(4)
>>> np.set_string_function(lambda x:'random', repr=False)
>>> x.__str__()
'random'
>>> x.__repr__()
'array([ 0, 1, 2, 3])'
"""
if f is None:
if repr:
return multiarray.set_string_function(array_repr, 1)
else:
return multiarray.set_string_function(array_str, 0)
else:
return multiarray.set_string_function(f, repr)
|
https://github.com/numpy/numpy/issues/12162
|
In [1]: import numpy as np
In [2]: class Sub(np.ndarray):
...: def __array_function__(*args, **kwargs):
...: return NotImplemented
...:
In [3]: repr(np.array(1).view(Sub))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-6-d6ff958f4fcf> in <module>()
----> 1 repr(np.array(1).view(Sub))
~/dev/numpy/numpy/core/overrides.py in public_api(*args, **kwargs)
149 relevant_args = dispatcher(*args, **kwargs)
150 return array_function_implementation_or_override(
--> 151 implementation, public_api, relevant_args, args, kwargs)
152 return public_api
153
~/dev/numpy/numpy/core/overrides.py in array_function_implementation_or_override(implementation, public_api, relevant_args, args, kwargs)
108 raise TypeError('no implementation found for {} on types that implement '
109 '__array_function__: {}'
--> 110 .format(public_api, list(map(type, overloaded_args))))
111
112
TypeError: no implementation found for <function array_repr at 0x105692e18> on types that implement __array_function__: [<class '__main__.Sub'>]
|
TypeError
|
def array_function_dispatch(dispatcher, module=None, verify=True):
"""Decorator for adding dispatch with the __array_function__ protocol."""
def decorator(implementation):
# TODO: only do this check when the appropriate flag is enabled or for
# a dev install. We want this check for testing but don't want to
# slow down all numpy imports.
if verify:
verify_matching_signatures(implementation, dispatcher)
@functools.wraps(implementation)
def public_api(*args, **kwargs):
relevant_args = dispatcher(*args, **kwargs)
return array_function_implementation_or_override(
implementation, public_api, relevant_args, args, kwargs
)
if module is not None:
public_api.__module__ = module
# TODO: remove this when we drop Python 2 support (functools.wraps
# adds __wrapped__ automatically in later versions)
public_api.__wrapped__ = implementation
return public_api
return decorator
|
def array_function_dispatch(dispatcher, module=None, verify=True):
"""Decorator for adding dispatch with the __array_function__ protocol."""
def decorator(implementation):
# TODO: only do this check when the appropriate flag is enabled or for
# a dev install. We want this check for testing but don't want to
# slow down all numpy imports.
if verify:
verify_matching_signatures(implementation, dispatcher)
@functools.wraps(implementation)
def public_api(*args, **kwargs):
relevant_args = dispatcher(*args, **kwargs)
return array_function_implementation_or_override(
implementation, public_api, relevant_args, args, kwargs
)
if module is not None:
public_api.__module__ = module
return public_api
return decorator
|
https://github.com/numpy/numpy/issues/12162
|
In [1]: import numpy as np
In [2]: class Sub(np.ndarray):
...: def __array_function__(*args, **kwargs):
...: return NotImplemented
...:
In [3]: repr(np.array(1).view(Sub))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-6-d6ff958f4fcf> in <module>()
----> 1 repr(np.array(1).view(Sub))
~/dev/numpy/numpy/core/overrides.py in public_api(*args, **kwargs)
149 relevant_args = dispatcher(*args, **kwargs)
150 return array_function_implementation_or_override(
--> 151 implementation, public_api, relevant_args, args, kwargs)
152 return public_api
153
~/dev/numpy/numpy/core/overrides.py in array_function_implementation_or_override(implementation, public_api, relevant_args, args, kwargs)
108 raise TypeError('no implementation found for {} on types that implement '
109 '__array_function__: {}'
--> 110 .format(public_api, list(map(type, overloaded_args))))
111
112
TypeError: no implementation found for <function array_repr at 0x105692e18> on types that implement __array_function__: [<class '__main__.Sub'>]
|
TypeError
|
def decorator(implementation):
# TODO: only do this check when the appropriate flag is enabled or for
# a dev install. We want this check for testing but don't want to
# slow down all numpy imports.
if verify:
verify_matching_signatures(implementation, dispatcher)
@functools.wraps(implementation)
def public_api(*args, **kwargs):
relevant_args = dispatcher(*args, **kwargs)
return array_function_implementation_or_override(
implementation, public_api, relevant_args, args, kwargs
)
if module is not None:
public_api.__module__ = module
# TODO: remove this when we drop Python 2 support (functools.wraps
# adds __wrapped__ automatically in later versions)
public_api.__wrapped__ = implementation
return public_api
|
def decorator(implementation):
# TODO: only do this check when the appropriate flag is enabled or for
# a dev install. We want this check for testing but don't want to
# slow down all numpy imports.
if verify:
verify_matching_signatures(implementation, dispatcher)
@functools.wraps(implementation)
def public_api(*args, **kwargs):
relevant_args = dispatcher(*args, **kwargs)
return array_function_implementation_or_override(
implementation, public_api, relevant_args, args, kwargs
)
if module is not None:
public_api.__module__ = module
return public_api
|
https://github.com/numpy/numpy/issues/12162
|
In [1]: import numpy as np
In [2]: class Sub(np.ndarray):
...: def __array_function__(*args, **kwargs):
...: return NotImplemented
...:
In [3]: repr(np.array(1).view(Sub))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-6-d6ff958f4fcf> in <module>()
----> 1 repr(np.array(1).view(Sub))
~/dev/numpy/numpy/core/overrides.py in public_api(*args, **kwargs)
149 relevant_args = dispatcher(*args, **kwargs)
150 return array_function_implementation_or_override(
--> 151 implementation, public_api, relevant_args, args, kwargs)
152 return public_api
153
~/dev/numpy/numpy/core/overrides.py in array_function_implementation_or_override(implementation, public_api, relevant_args, args, kwargs)
108 raise TypeError('no implementation found for {} on types that implement '
109 '__array_function__: {}'
--> 110 .format(public_api, list(map(type, overloaded_args))))
111
112
TypeError: no implementation found for <function array_repr at 0x105692e18> on types that implement __array_function__: [<class '__main__.Sub'>]
|
TypeError
|
def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
If fill_value is None, it is set to the default corresponding to the dtype.
If fill_value is not None, its value is forced to the given dtype.
The result is always a 0d array.
"""
ndtype = np.dtype(ndtype)
fields = ndtype.fields
if fill_value is None:
fill_value = default_fill_value(ndtype)
elif fields:
fdtype = [(_[0], _[1]) for _ in ndtype.descr]
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.array(fill_value, copy=False, dtype=fdtype)
except ValueError:
err_msg = "Unable to transform %s to dtype %s"
raise ValueError(err_msg % (fill_value, fdtype))
else:
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(
_recursive_set_fill_value(fill_value, ndtype), dtype=ndtype
)
else:
if isinstance(fill_value, basestring) and (ndtype.char not in "OSVU"):
# Note this check doesn't work if fill_value is not a scalar
err_msg = "Cannot set fill value of string with array of dtype %s"
raise TypeError(err_msg % ndtype)
else:
# In case we want to convert 1e20 to int.
# Also in case of converting string arrays.
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
except (OverflowError, ValueError):
# Raise TypeError instead of OverflowError or ValueError.
# OverflowError is seldom used, and the real problem here is
# that the passed fill_value is not compatible with the ndtype.
err_msg = "Cannot convert fill_value %s to dtype %s"
raise TypeError(err_msg % (fill_value, ndtype))
return np.array(fill_value)
|
def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
If fill_value is None, it is set to the default corresponding to the dtype.
If fill_value is not None, its value is forced to the given dtype.
The result is always a 0d array.
"""
ndtype = np.dtype(ndtype)
fields = ndtype.fields
if fill_value is None:
fill_value = default_fill_value(ndtype)
elif fields:
fdtype = [(_[0], _[1]) for _ in ndtype.descr]
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.array(fill_value, copy=False, dtype=fdtype)
except ValueError:
err_msg = "Unable to transform %s to dtype %s"
raise ValueError(err_msg % (fill_value, fdtype))
else:
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(
_recursive_set_fill_value(fill_value, ndtype), dtype=ndtype
)
else:
if isinstance(fill_value, basestring) and (ndtype.char not in "OSVU"):
err_msg = "Cannot set fill value of string with array of dtype %s"
raise TypeError(err_msg % ndtype)
else:
# In case we want to convert 1e20 to int.
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
except OverflowError:
# Raise TypeError instead of OverflowError. OverflowError
# is seldom used, and the real problem here is that the
# passed fill_value is not compatible with the ndtype.
err_msg = "Fill value %s overflows dtype %s"
raise TypeError(err_msg % (fill_value, ndtype))
return np.array(fill_value)
|
https://github.com/numpy/numpy/issues/12248
|
(np.ma.array(['A', 'B', 'C'], fill_value='N') == 'A') | (np.ma.array(['A', 'B', 'C'], fill_value='N') == 'A')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/tom/miniconda3/envs/np1153/lib/python3.6/site-packages/numpy/ma/core.py", line 3016, in __array_finalize__
self._fill_value = _check_fill_value(self._fill_value, self.dtype)
File "/Users/tom/miniconda3/envs/np1153/lib/python3.6/site-packages/numpy/ma/core.py", line 476, in _check_fill_value
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
ValueError: invalid literal for int() with base 10: 'N'
|
ValueError
|
def _comparison(self, other, compare):
"""Compare self with other using operator.eq or operator.ne.
When either of the elements is masked, the result is masked as well,
but the underlying boolean data are still set, with self and other
considered equal if both are masked, and unequal otherwise.
For structured arrays, all fields are combined, with masked values
ignored. The result is masked if all fields were masked, with self
and other considered equal only if both were fully masked.
"""
omask = getmask(other)
smask = self.mask
mask = mask_or(smask, omask, copy=True)
odata = getdata(other)
if mask.dtype.names is not None:
# For possibly masked structured arrays we need to be careful,
# since the standard structured array comparison will use all
# fields, masked or not. To avoid masked fields influencing the
# outcome, we set all masked fields in self to other, so they'll
# count as equal. To prepare, we ensure we have the right shape.
broadcast_shape = np.broadcast(self, odata).shape
sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True)
sbroadcast._mask = mask
sdata = sbroadcast.filled(odata)
# Now take care of the mask; the merged mask should have an item
# masked if all fields were masked (in one and/or other).
mask = mask == np.ones((), mask.dtype)
else:
# For regular arrays, just use the data as they come.
sdata = self.data
check = compare(sdata, odata)
if isinstance(check, (np.bool_, bool)):
return masked if mask else check
if mask is not nomask:
# Adjust elements that were masked, which should be treated
# as equal if masked in both, unequal if masked in one.
# Note that this works automatically for structured arrays too.
check = np.where(mask, compare(smask, omask), check)
if mask.shape != check.shape:
# Guarantee consistency of the shape, making a copy since the
# the mask may need to get written to later.
mask = np.broadcast_to(mask, check.shape).copy()
check = check.view(type(self))
check._update_from(self)
check._mask = mask
# Cast fill value to bool_ if needed. If it cannot be cast, the
# default boolean fill value is used.
if check._fill_value is not None:
try:
fill = _check_fill_value(check._fill_value, np.bool_)
except (TypeError, ValueError):
fill = _check_fill_value(None, np.bool_)
check._fill_value = fill
return check
|
def _comparison(self, other, compare):
"""Compare self with other using operator.eq or operator.ne.
When either of the elements is masked, the result is masked as well,
but the underlying boolean data are still set, with self and other
considered equal if both are masked, and unequal otherwise.
For structured arrays, all fields are combined, with masked values
ignored. The result is masked if all fields were masked, with self
and other considered equal only if both were fully masked.
"""
omask = getmask(other)
smask = self.mask
mask = mask_or(smask, omask, copy=True)
odata = getdata(other)
if mask.dtype.names is not None:
# For possibly masked structured arrays we need to be careful,
# since the standard structured array comparison will use all
# fields, masked or not. To avoid masked fields influencing the
# outcome, we set all masked fields in self to other, so they'll
# count as equal. To prepare, we ensure we have the right shape.
broadcast_shape = np.broadcast(self, odata).shape
sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True)
sbroadcast._mask = mask
sdata = sbroadcast.filled(odata)
# Now take care of the mask; the merged mask should have an item
# masked if all fields were masked (in one and/or other).
mask = mask == np.ones((), mask.dtype)
else:
# For regular arrays, just use the data as they come.
sdata = self.data
check = compare(sdata, odata)
if isinstance(check, (np.bool_, bool)):
return masked if mask else check
if mask is not nomask:
# Adjust elements that were masked, which should be treated
# as equal if masked in both, unequal if masked in one.
# Note that this works automatically for structured arrays too.
check = np.where(mask, compare(smask, omask), check)
if mask.shape != check.shape:
# Guarantee consistency of the shape, making a copy since the
# the mask may need to get written to later.
mask = np.broadcast_to(mask, check.shape).copy()
check = check.view(type(self))
check._update_from(self)
check._mask = mask
return check
|
https://github.com/numpy/numpy/issues/12248
|
(np.ma.array(['A', 'B', 'C'], fill_value='N') == 'A') | (np.ma.array(['A', 'B', 'C'], fill_value='N') == 'A')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/tom/miniconda3/envs/np1153/lib/python3.6/site-packages/numpy/ma/core.py", line 3016, in __array_finalize__
self._fill_value = _check_fill_value(self._fill_value, self.dtype)
File "/Users/tom/miniconda3/envs/np1153/lib/python3.6/site-packages/numpy/ma/core.py", line 476, in _check_fill_value
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
ValueError: invalid literal for int() with base 10: 'N'
|
ValueError
|
def _comparison(self, other, compare):
"""Compare self with other using operator.eq or operator.ne.
When either of the elements is masked, the result is masked as well,
but the underlying boolean data are still set, with self and other
considered equal if both are masked, and unequal otherwise.
For structured arrays, all fields are combined, with masked values
ignored. The result is masked if all fields were masked, with self
and other considered equal only if both were fully masked.
"""
omask = getmask(other)
smask = self.mask
mask = mask_or(smask, omask, copy=True)
odata = getdata(other)
if mask.dtype.names:
# For possibly masked structured arrays we need to be careful,
# since the standard structured array comparison will use all
# fields, masked or not. To avoid masked fields influencing the
# outcome, we set all masked fields in self to other, so they'll
# count as equal. To prepare, we ensure we have the right shape.
broadcast_shape = np.broadcast(self, odata).shape
sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True)
sbroadcast._mask = mask
sdata = sbroadcast.filled(odata)
# Now take care of the mask; the merged mask should have an item
# masked if all fields were masked (in one and/or other).
mask = mask == np.ones((), mask.dtype)
else:
# For regular arrays, just use the data as they come.
sdata = self.data
check = compare(sdata, odata)
if isinstance(check, (np.bool_, bool)):
return masked if mask else check
if mask is not nomask:
# Adjust elements that were masked, which should be treated
# as equal if masked in both, unequal if masked in one.
# Note that this works automatically for structured arrays too.
check = np.where(mask, compare(smask, omask), check)
if mask.shape != check.shape:
# Guarantee consistency of the shape, making a copy since the
# the mask may need to get written to later.
mask = np.broadcast_to(mask, check.shape).copy()
check = check.view(type(self))
check._update_from(self)
check._mask = mask
# Cast fill value to bool_ if needed. If it cannot be cast, the
# default boolean fill value is used.
if check._fill_value is not None:
try:
fill = _check_fill_value(check._fill_value, np.bool_)
except (TypeError, ValueError):
fill = _check_fill_value(None, np.bool_)
check._fill_value = fill
return check
|
def _comparison(self, other, compare):
"""Compare self with other using operator.eq or operator.ne.
When either of the elements is masked, the result is masked as well,
but the underlying boolean data are still set, with self and other
considered equal if both are masked, and unequal otherwise.
For structured arrays, all fields are combined, with masked values
ignored. The result is masked if all fields were masked, with self
and other considered equal only if both were fully masked.
"""
omask = getmask(other)
smask = self.mask
mask = mask_or(smask, omask, copy=True)
odata = getdata(other)
if mask.dtype.names:
# For possibly masked structured arrays we need to be careful,
# since the standard structured array comparison will use all
# fields, masked or not. To avoid masked fields influencing the
# outcome, we set all masked fields in self to other, so they'll
# count as equal. To prepare, we ensure we have the right shape.
broadcast_shape = np.broadcast(self, odata).shape
sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True)
sbroadcast._mask = mask
sdata = sbroadcast.filled(odata)
# Now take care of the mask; the merged mask should have an item
# masked if all fields were masked (in one and/or other).
mask = mask == np.ones((), mask.dtype)
else:
# For regular arrays, just use the data as they come.
sdata = self.data
check = compare(sdata, odata)
if isinstance(check, (np.bool_, bool)):
return masked if mask else check
if mask is not nomask:
# Adjust elements that were masked, which should be treated
# as equal if masked in both, unequal if masked in one.
# Note that this works automatically for structured arrays too.
check = np.where(mask, compare(smask, omask), check)
if mask.shape != check.shape:
# Guarantee consistency of the shape, making a copy since the
# the mask may need to get written to later.
mask = np.broadcast_to(mask, check.shape).copy()
check = check.view(type(self))
check._update_from(self)
check._mask = mask
return check
|
https://github.com/numpy/numpy/issues/12248
|
(np.ma.array(['A', 'B', 'C'], fill_value='N') == 'A') | (np.ma.array(['A', 'B', 'C'], fill_value='N') == 'A')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/tom/miniconda3/envs/np1153/lib/python3.6/site-packages/numpy/ma/core.py", line 3016, in __array_finalize__
self._fill_value = _check_fill_value(self._fill_value, self.dtype)
File "/Users/tom/miniconda3/envs/np1153/lib/python3.6/site-packages/numpy/ma/core.py", line 476, in _check_fill_value
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
ValueError: invalid literal for int() with base 10: 'N'
|
ValueError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.