after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def update(
self, update_expression, expression_attribute_names, expression_attribute_values
):
# Update subexpressions are identifiable by the operator keyword, so split on that and
# get rid of the empty leading string.
parts = [
p
for p in re.split(r"\b(SET|REMOVE|ADD|DELETE)\b", update_expression, flags=re.I)
if p
]
# make sure that we correctly found only operator/value pairs
assert len(parts) % 2 == 0, (
"Mismatched operators and values in update expression: '{}'".format(
update_expression
)
)
for action, valstr in zip(parts[:-1:2], parts[1::2]):
action = action.upper()
# "Should" retain arguments in side (...)
values = re.split(r",(?![^(]*\))", valstr)
for value in values:
# A Real value
value = value.lstrip(":").rstrip(",").strip()
for k, v in expression_attribute_names.items():
value = re.sub(r"{0}\b".format(k), v, value)
if action == "REMOVE":
key = value
attr, list_index = attribute_is_list(key.split(".")[0])
if "." not in key:
if list_index:
new_list = DynamoType(self.attrs[attr])
new_list.delete(None, list_index)
self.attrs[attr] = new_list
else:
self.attrs.pop(value, None)
else:
# Handle nested dict updates
self.attrs[attr].delete(".".join(key.split(".")[1:]))
elif action == "SET":
key, value = value.split("=", 1)
key = key.strip()
value = value.strip()
# check whether key is a list
attr, list_index = attribute_is_list(key.split(".")[0])
# If value not exists, changes value to a default if needed, else its the same as it was
value = self._get_default(value)
# If operation == list_append, get the original value and append it
value = self._get_appended_list(value, expression_attribute_values)
if type(value) != DynamoType:
if value in expression_attribute_values:
dyn_value = DynamoType(expression_attribute_values[value])
else:
dyn_value = DynamoType({"S": value})
else:
dyn_value = value
if "." in key and attr not in self.attrs:
raise ValueError # Setting nested attr not allowed if first attr does not exist yet
elif attr not in self.attrs:
self.attrs[attr] = dyn_value # set new top-level attribute
else:
self.attrs[attr].set(
".".join(key.split(".")[1:]), dyn_value, list_index
) # set value recursively
elif action == "ADD":
key, value = value.split(" ", 1)
key = key.strip()
value_str = value.strip()
if value_str in expression_attribute_values:
dyn_value = DynamoType(expression_attribute_values[value])
else:
raise TypeError
# Handle adding numbers - value gets added to existing value,
# or added to 0 if it doesn't exist yet
if dyn_value.is_number():
existing = self.attrs.get(key, DynamoType({"N": "0"}))
if not existing.same_type(dyn_value):
raise TypeError()
self.attrs[key] = DynamoType(
{
"N": str(
decimal.Decimal(existing.value)
+ decimal.Decimal(dyn_value.value)
)
}
)
# Handle adding sets - value is added to the set, or set is
# created with only this value if it doesn't exist yet
# New value must be of same set type as previous value
elif dyn_value.is_set():
existing = self.attrs.get(key, DynamoType({dyn_value.type: {}}))
if not existing.same_type(dyn_value):
raise TypeError()
new_set = set(existing.value).union(dyn_value.value)
self.attrs[key] = DynamoType({existing.type: list(new_set)})
else: # Number and Sets are the only supported types for ADD
raise TypeError
elif action == "DELETE":
key, value = value.split(" ", 1)
key = key.strip()
value_str = value.strip()
if value_str in expression_attribute_values:
dyn_value = DynamoType(expression_attribute_values[value])
else:
raise TypeError
if not dyn_value.is_set():
raise TypeError
existing = self.attrs.get(key, None)
if existing:
if not existing.same_type(dyn_value):
raise TypeError
new_set = set(existing.value).difference(dyn_value.value)
self.attrs[key] = DynamoType({existing.type: list(new_set)})
else:
raise NotImplementedError(
"{} update action not yet supported".format(action)
)
|
def update(
self, update_expression, expression_attribute_names, expression_attribute_values
):
# Update subexpressions are identifiable by the operator keyword, so split on that and
# get rid of the empty leading string.
parts = [
p
for p in re.split(r"\b(SET|REMOVE|ADD|DELETE)\b", update_expression, flags=re.I)
if p
]
# make sure that we correctly found only operator/value pairs
assert len(parts) % 2 == 0, (
"Mismatched operators and values in update expression: '{}'".format(
update_expression
)
)
for action, valstr in zip(parts[:-1:2], parts[1::2]):
action = action.upper()
# "Should" retain arguments in side (...)
values = re.split(r",(?![^(]*\))", valstr)
for value in values:
# A Real value
value = value.lstrip(":").rstrip(",").strip()
for k, v in expression_attribute_names.items():
value = re.sub(r"{0}\b".format(k), v, value)
if action == "REMOVE":
key = value
attr, list_index = attribute_is_list(key.split(".")[0])
if "." not in key:
if list_index:
new_list = DynamoType(self.attrs[attr])
new_list.delete(None, list_index)
self.attrs[attr] = new_list
else:
self.attrs.pop(value, None)
else:
# Handle nested dict updates
self.attrs[attr].delete(".".join(key.split(".")[1:]))
elif action == "SET":
key, value = value.split("=", 1)
key = key.strip()
value = value.strip()
# check whether key is a list
attr, list_index = attribute_is_list(key.split(".")[0])
# If value not exists, changes value to a default if needed, else its the same as it was
value = self._get_default(value)
if type(value) != DynamoType:
if value in expression_attribute_values:
dyn_value = DynamoType(expression_attribute_values[value])
else:
dyn_value = DynamoType({"S": value})
else:
dyn_value = value
if "." in key and attr not in self.attrs:
raise ValueError # Setting nested attr not allowed if first attr does not exist yet
elif attr not in self.attrs:
self.attrs[attr] = dyn_value # set new top-level attribute
else:
self.attrs[attr].set(
".".join(key.split(".")[1:]), dyn_value, list_index
) # set value recursively
elif action == "ADD":
key, value = value.split(" ", 1)
key = key.strip()
value_str = value.strip()
if value_str in expression_attribute_values:
dyn_value = DynamoType(expression_attribute_values[value])
else:
raise TypeError
# Handle adding numbers - value gets added to existing value,
# or added to 0 if it doesn't exist yet
if dyn_value.is_number():
existing = self.attrs.get(key, DynamoType({"N": "0"}))
if not existing.same_type(dyn_value):
raise TypeError()
self.attrs[key] = DynamoType(
{
"N": str(
decimal.Decimal(existing.value)
+ decimal.Decimal(dyn_value.value)
)
}
)
# Handle adding sets - value is added to the set, or set is
# created with only this value if it doesn't exist yet
# New value must be of same set type as previous value
elif dyn_value.is_set():
existing = self.attrs.get(key, DynamoType({dyn_value.type: {}}))
if not existing.same_type(dyn_value):
raise TypeError()
new_set = set(existing.value).union(dyn_value.value)
self.attrs[key] = DynamoType({existing.type: list(new_set)})
else: # Number and Sets are the only supported types for ADD
raise TypeError
elif action == "DELETE":
key, value = value.split(" ", 1)
key = key.strip()
value_str = value.strip()
if value_str in expression_attribute_values:
dyn_value = DynamoType(expression_attribute_values[value])
else:
raise TypeError
if not dyn_value.is_set():
raise TypeError
existing = self.attrs.get(key, None)
if existing:
if not existing.same_type(dyn_value):
raise TypeError
new_set = set(existing.value).difference(dyn_value.value)
self.attrs[key] = DynamoType({existing.type: list(new_set)})
else:
raise NotImplementedError(
"{} update action not yet supported".format(action)
)
|
https://github.com/spulec/moto/issues/847
|
Traceback (most recent call last):
File "poc.py", line 35, in <module>
update_does_not_work()
File "/usr/local/lib/python2.7/site-packages/moto/core/models.py", line 71, in wrapper
result = func(*args, **kwargs)
File "poc.py", line 30, in update_does_not_work
':map': {'M': {'EntryKey': {'SS': ['thing1', 'thing2']}}}
File "/usr/local/lib/python2.7/site-packages/botocore/client.py", line 253, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python2.7/site-packages/botocore/client.py", line 530, in _make_api_call
operation_model, request_dict)
File "/usr/local/lib/python2.7/site-packages/botocore/endpoint.py", line 141, in make_request
return self._send_request(request_dict, operation_model)
File "/usr/local/lib/python2.7/site-packages/botocore/endpoint.py", line 170, in _send_request
success_response, exception):
File "/usr/local/lib/python2.7/site-packages/botocore/endpoint.py", line 249, in _needs_retry
caught_exception=caught_exception, request_dict=request_dict)
File "/usr/local/lib/python2.7/site-packages/botocore/hooks.py", line 227, in emit
return self._emit(event_name, kwargs)
File "/usr/local/lib/python2.7/site-packages/botocore/hooks.py", line 210, in _emit
response = handler(**kwargs)
File "/usr/local/lib/python2.7/site-packages/botocore/retryhandler.py", line 183, in __call__
if self._checker(attempts, response, caught_exception):
File "/usr/local/lib/python2.7/site-packages/botocore/retryhandler.py", line 251, in __call__
caught_exception)
File "/usr/local/lib/python2.7/site-packages/botocore/retryhandler.py", line 269, in _should_retry
return self._checker(attempt_number, response, caught_exception)
File "/usr/local/lib/python2.7/site-packages/botocore/retryhandler.py", line 317, in __call__
caught_exception)
File "/usr/local/lib/python2.7/site-packages/botocore/retryhandler.py", line 223, in __call__
attempt_number, caught_exception)
File "/usr/local/lib/python2.7/site-packages/botocore/retryhandler.py", line 359, in _check_caught_exception
raise caught_exception
ValueError: too many values to unpack
|
ValueError
|
def __init__(self, job_id, tier, arn, archive_id):
self.job_id = job_id
self.tier = tier
self.arn = arn
self.archive_id = archive_id
Job.__init__(self, tier)
|
def __init__(self, job_id, archive_id):
self.job_id = job_id
self.archive_id = archive_id
|
https://github.com/spulec/moto/issues/1113
|
127.0.0.1 - - [06/Sep/2017 14:40:32] "POST /kefo/vaults/testvault/archives HTTP/1.1" 500 -
Error on request:
Traceback (most recent call last):
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 209, in run_wsgi
execute(self.server.app)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 197, in execute
application_iter = app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/server.py", line 92, in __call__
return backend_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1997, in __call__
return self.wsgi_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1985, in wsgi_app
response = self.handle_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/core/utils.py", line 131, in __call__
result = self.callback(request, request.url, {})
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 71, in vault_archive_response
return response_instance._vault_archive_response(request, full_url, headers)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 75, in _vault_archive_response
body = request.body
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: 'Request' object has no attribute 'body'
|
AttributeError
|
def to_dict(self):
d = {
"Action": "ArchiveRetrieval",
"ArchiveId": self.archive_id,
"ArchiveSizeInBytes": 0,
"ArchiveSHA256TreeHash": None,
"Completed": False,
"CreationDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"),
"InventorySizeInBytes": 0,
"JobDescription": None,
"JobId": self.job_id,
"RetrievalByteRange": None,
"SHA256TreeHash": None,
"SNSTopic": None,
"StatusCode": "InProgress",
"StatusMessage": None,
"VaultARN": self.arn,
"Tier": self.tier,
}
if datetime.datetime.now() > self.et:
d["Completed"] = True
d["CompletionDate"] = self.et.strftime("%Y-%m-%dT%H:%M:%S.000Z")
d["InventorySizeInBytes"] = 10000
d["StatusCode"] = "Succeeded"
return d
|
def to_dict(self):
return {
"Action": "InventoryRetrieval",
"ArchiveId": self.archive_id,
"ArchiveSizeInBytes": 0,
"ArchiveSHA256TreeHash": None,
"Completed": True,
"CompletionDate": "2013-03-20T17:03:43.221Z",
"CreationDate": "2013-03-20T17:03:43.221Z",
"InventorySizeInBytes": "0",
"JobDescription": None,
"JobId": self.job_id,
"RetrievalByteRange": None,
"SHA256TreeHash": None,
"SNSTopic": None,
"StatusCode": "Succeeded",
"StatusMessage": None,
"VaultARN": None,
}
|
https://github.com/spulec/moto/issues/1113
|
127.0.0.1 - - [06/Sep/2017 14:40:32] "POST /kefo/vaults/testvault/archives HTTP/1.1" 500 -
Error on request:
Traceback (most recent call last):
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 209, in run_wsgi
execute(self.server.app)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 197, in execute
application_iter = app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/server.py", line 92, in __call__
return backend_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1997, in __call__
return self.wsgi_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1985, in wsgi_app
response = self.handle_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/core/utils.py", line 131, in __call__
result = self.callback(request, request.url, {})
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 71, in vault_archive_response
return response_instance._vault_archive_response(request, full_url, headers)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 75, in _vault_archive_response
body = request.body
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: 'Request' object has no attribute 'body'
|
AttributeError
|
def __init__(self, vault_name, region):
self.st = datetime.datetime.now()
self.vault_name = vault_name
self.region = region
self.archives = {}
self.jobs = {}
|
def __init__(self, vault_name, region):
self.vault_name = vault_name
self.region = region
self.archives = {}
self.jobs = {}
|
https://github.com/spulec/moto/issues/1113
|
127.0.0.1 - - [06/Sep/2017 14:40:32] "POST /kefo/vaults/testvault/archives HTTP/1.1" 500 -
Error on request:
Traceback (most recent call last):
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 209, in run_wsgi
execute(self.server.app)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 197, in execute
application_iter = app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/server.py", line 92, in __call__
return backend_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1997, in __call__
return self.wsgi_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1985, in wsgi_app
response = self.handle_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/core/utils.py", line 131, in __call__
result = self.callback(request, request.url, {})
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 71, in vault_archive_response
return response_instance._vault_archive_response(request, full_url, headers)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 75, in _vault_archive_response
body = request.body
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: 'Request' object has no attribute 'body'
|
AttributeError
|
def to_dict(self):
archives_size = 0
for k in self.archives:
archives_size += self.archives[k]["size"]
d = {
"CreationDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"),
"LastInventoryDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"),
"NumberOfArchives": len(self.archives),
"SizeInBytes": archives_size,
"VaultARN": self.arn,
"VaultName": self.vault_name,
}
return d
|
def to_dict(self):
return {
"CreationDate": "2013-03-20T17:03:43.221Z",
"LastInventoryDate": "2013-03-20T17:03:43.221Z",
"NumberOfArchives": None,
"SizeInBytes": None,
"VaultARN": self.arn,
"VaultName": self.vault_name,
}
|
https://github.com/spulec/moto/issues/1113
|
127.0.0.1 - - [06/Sep/2017 14:40:32] "POST /kefo/vaults/testvault/archives HTTP/1.1" 500 -
Error on request:
Traceback (most recent call last):
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 209, in run_wsgi
execute(self.server.app)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 197, in execute
application_iter = app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/server.py", line 92, in __call__
return backend_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1997, in __call__
return self.wsgi_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1985, in wsgi_app
response = self.handle_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/core/utils.py", line 131, in __call__
result = self.callback(request, request.url, {})
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 71, in vault_archive_response
return response_instance._vault_archive_response(request, full_url, headers)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 75, in _vault_archive_response
body = request.body
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: 'Request' object has no attribute 'body'
|
AttributeError
|
def create_archive(self, body, description):
archive_id = hashlib.md5(body).hexdigest()
self.archives[archive_id] = {}
self.archives[archive_id]["body"] = body
self.archives[archive_id]["size"] = len(body)
self.archives[archive_id]["sha256"] = hashlib.sha256(body).hexdigest()
self.archives[archive_id]["creation_date"] = datetime.datetime.now().strftime(
"%Y-%m-%dT%H:%M:%S.000Z"
)
self.archives[archive_id]["description"] = description
return archive_id
|
def create_archive(self, body):
archive_id = hashlib.sha256(body).hexdigest()
self.archives[archive_id] = body
return archive_id
|
https://github.com/spulec/moto/issues/1113
|
127.0.0.1 - - [06/Sep/2017 14:40:32] "POST /kefo/vaults/testvault/archives HTTP/1.1" 500 -
Error on request:
Traceback (most recent call last):
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 209, in run_wsgi
execute(self.server.app)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 197, in execute
application_iter = app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/server.py", line 92, in __call__
return backend_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1997, in __call__
return self.wsgi_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1985, in wsgi_app
response = self.handle_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/core/utils.py", line 131, in __call__
result = self.callback(request, request.url, {})
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 71, in vault_archive_response
return response_instance._vault_archive_response(request, full_url, headers)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 75, in _vault_archive_response
body = request.body
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: 'Request' object has no attribute 'body'
|
AttributeError
|
def get_archive_body(self, archive_id):
return self.archives[archive_id]["body"]
|
def get_archive_body(self, archive_id):
return self.archives[archive_id]
|
https://github.com/spulec/moto/issues/1113
|
127.0.0.1 - - [06/Sep/2017 14:40:32] "POST /kefo/vaults/testvault/archives HTTP/1.1" 500 -
Error on request:
Traceback (most recent call last):
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 209, in run_wsgi
execute(self.server.app)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 197, in execute
application_iter = app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/server.py", line 92, in __call__
return backend_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1997, in __call__
return self.wsgi_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1985, in wsgi_app
response = self.handle_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/core/utils.py", line 131, in __call__
result = self.callback(request, request.url, {})
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 71, in vault_archive_response
return response_instance._vault_archive_response(request, full_url, headers)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 75, in _vault_archive_response
body = request.body
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: 'Request' object has no attribute 'body'
|
AttributeError
|
def initiate_job(self, job_type, tier, archive_id):
job_id = get_job_id()
if job_type == "inventory-retrieval":
job = InventoryJob(job_id, tier, self.arn)
elif job_type == "archive-retrieval":
job = ArchiveJob(job_id, tier, self.arn, archive_id)
self.jobs[job_id] = job
return job_id
|
def initiate_job(self, archive_id):
job_id = get_job_id()
job = ArchiveJob(job_id, archive_id)
self.jobs[job_id] = job
return job_id
|
https://github.com/spulec/moto/issues/1113
|
127.0.0.1 - - [06/Sep/2017 14:40:32] "POST /kefo/vaults/testvault/archives HTTP/1.1" 500 -
Error on request:
Traceback (most recent call last):
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 209, in run_wsgi
execute(self.server.app)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 197, in execute
application_iter = app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/server.py", line 92, in __call__
return backend_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1997, in __call__
return self.wsgi_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1985, in wsgi_app
response = self.handle_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/core/utils.py", line 131, in __call__
result = self.callback(request, request.url, {})
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 71, in vault_archive_response
return response_instance._vault_archive_response(request, full_url, headers)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 75, in _vault_archive_response
body = request.body
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: 'Request' object has no attribute 'body'
|
AttributeError
|
def get_job_output(self, job_id):
job = self.describe_job(job_id)
jobj = job.to_dict()
if jobj["Action"] == "InventoryRetrieval":
archives = self.get_archive_list()
return {
"VaultARN": self.arn,
"InventoryDate": jobj["CompletionDate"],
"ArchiveList": archives,
}
else:
archive_body = self.get_archive_body(job.archive_id)
return archive_body
|
def get_job_output(self, job_id):
job = self.describe_job(job_id)
archive_body = self.get_archive_body(job.archive_id)
return archive_body
|
https://github.com/spulec/moto/issues/1113
|
127.0.0.1 - - [06/Sep/2017 14:40:32] "POST /kefo/vaults/testvault/archives HTTP/1.1" 500 -
Error on request:
Traceback (most recent call last):
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 209, in run_wsgi
execute(self.server.app)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 197, in execute
application_iter = app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/server.py", line 92, in __call__
return backend_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1997, in __call__
return self.wsgi_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1985, in wsgi_app
response = self.handle_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/core/utils.py", line 131, in __call__
result = self.callback(request, request.url, {})
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 71, in vault_archive_response
return response_instance._vault_archive_response(request, full_url, headers)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 75, in _vault_archive_response
body = request.body
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: 'Request' object has no attribute 'body'
|
AttributeError
|
def initiate_job(self, vault_name, job_type, tier, archive_id):
vault = self.get_vault(vault_name)
job_id = vault.initiate_job(job_type, tier, archive_id)
return job_id
|
def initiate_job(self, vault_name, archive_id):
vault = self.get_vault(vault_name)
job_id = vault.initiate_job(archive_id)
return job_id
|
https://github.com/spulec/moto/issues/1113
|
127.0.0.1 - - [06/Sep/2017 14:40:32] "POST /kefo/vaults/testvault/archives HTTP/1.1" 500 -
Error on request:
Traceback (most recent call last):
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 209, in run_wsgi
execute(self.server.app)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 197, in execute
application_iter = app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/server.py", line 92, in __call__
return backend_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1997, in __call__
return self.wsgi_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1985, in wsgi_app
response = self.handle_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/core/utils.py", line 131, in __call__
result = self.callback(request, request.url, {})
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 71, in vault_archive_response
return response_instance._vault_archive_response(request, full_url, headers)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 75, in _vault_archive_response
body = request.body
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: 'Request' object has no attribute 'body'
|
AttributeError
|
def _vault_archive_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
description = ""
if "x-amz-archive-description" in request.headers:
description = request.headers["x-amz-archive-description"]
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
vault_name = full_url.split("/")[-2]
if method == "POST":
return self._vault_archive_response_post(
vault_name, body, description, querystring, headers
)
else:
return 400, headers, "400 Bad Request"
|
def _vault_archive_response(self, request, full_url, headers):
method = request.method
body = request.body
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
vault_name = full_url.split("/")[-2]
if method == "POST":
return self._vault_archive_response_post(vault_name, body, querystring, headers)
|
https://github.com/spulec/moto/issues/1113
|
127.0.0.1 - - [06/Sep/2017 14:40:32] "POST /kefo/vaults/testvault/archives HTTP/1.1" 500 -
Error on request:
Traceback (most recent call last):
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 209, in run_wsgi
execute(self.server.app)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 197, in execute
application_iter = app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/server.py", line 92, in __call__
return backend_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1997, in __call__
return self.wsgi_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1985, in wsgi_app
response = self.handle_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/core/utils.py", line 131, in __call__
result = self.callback(request, request.url, {})
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 71, in vault_archive_response
return response_instance._vault_archive_response(request, full_url, headers)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 75, in _vault_archive_response
body = request.body
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: 'Request' object has no attribute 'body'
|
AttributeError
|
def _vault_archive_response_post(
self, vault_name, body, description, querystring, headers
):
vault = self.backend.get_vault(vault_name)
vault_id = vault.create_archive(body, description)
headers["x-amz-archive-id"] = vault_id
return 201, headers, ""
|
def _vault_archive_response_post(self, vault_name, body, querystring, headers):
vault = self.backend.get_vault(vault_name)
vault_id = vault.create_archive(body)
headers["x-amz-archive-id"] = vault_id
return 201, headers, ""
|
https://github.com/spulec/moto/issues/1113
|
127.0.0.1 - - [06/Sep/2017 14:40:32] "POST /kefo/vaults/testvault/archives HTTP/1.1" 500 -
Error on request:
Traceback (most recent call last):
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 209, in run_wsgi
execute(self.server.app)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 197, in execute
application_iter = app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/server.py", line 92, in __call__
return backend_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1997, in __call__
return self.wsgi_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1985, in wsgi_app
response = self.handle_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/core/utils.py", line 131, in __call__
result = self.callback(request, request.url, {})
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 71, in vault_archive_response
return response_instance._vault_archive_response(request, full_url, headers)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 75, in _vault_archive_response
body = request.body
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: 'Request' object has no attribute 'body'
|
AttributeError
|
def _vault_jobs_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
account_id = full_url.split("/")[1]
vault_name = full_url.split("/")[-2]
if method == "GET":
jobs = self.backend.list_jobs(vault_name)
headers["content-type"] = "application/json"
return (
200,
headers,
json.dumps(
{
"JobList": [job.to_dict() for job in jobs],
"Marker": None,
}
),
)
elif method == "POST":
json_body = json.loads(body.decode("utf-8"))
job_type = json_body["Type"]
archive_id = None
if "ArchiveId" in json_body:
archive_id = json_body["ArchiveId"]
if "Tier" in json_body:
tier = json_body["Tier"]
else:
tier = "Standard"
job_id = self.backend.initiate_job(vault_name, job_type, tier, archive_id)
headers["x-amz-job-id"] = job_id
headers["Location"] = "/{0}/vaults/{1}/jobs/{2}".format(
account_id, vault_name, job_id
)
return 202, headers, ""
|
def _vault_jobs_response(self, request, full_url, headers):
method = request.method
body = request.body
account_id = full_url.split("/")[1]
vault_name = full_url.split("/")[-2]
if method == "GET":
jobs = self.backend.list_jobs(vault_name)
headers["content-type"] = "application/json"
return (
200,
headers,
json.dumps(
{
"JobList": [job.to_dict() for job in jobs],
"Marker": None,
}
),
)
elif method == "POST":
json_body = json.loads(body.decode("utf-8"))
archive_id = json_body["ArchiveId"]
job_id = self.backend.initiate_job(vault_name, archive_id)
headers["x-amz-job-id"] = job_id
headers["Location"] = "/{0}/vaults/{1}/jobs/{2}".format(
account_id, vault_name, job_id
)
return 202, headers, ""
|
https://github.com/spulec/moto/issues/1113
|
127.0.0.1 - - [06/Sep/2017 14:40:32] "POST /kefo/vaults/testvault/archives HTTP/1.1" 500 -
Error on request:
Traceback (most recent call last):
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 209, in run_wsgi
execute(self.server.app)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 197, in execute
application_iter = app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/server.py", line 92, in __call__
return backend_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1997, in __call__
return self.wsgi_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1985, in wsgi_app
response = self.handle_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/core/utils.py", line 131, in __call__
result = self.callback(request, request.url, {})
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 71, in vault_archive_response
return response_instance._vault_archive_response(request, full_url, headers)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 75, in _vault_archive_response
body = request.body
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: 'Request' object has no attribute 'body'
|
AttributeError
|
def _vault_jobs_output_response(self, request, full_url, headers):
vault_name = full_url.split("/")[-4]
job_id = full_url.split("/")[-2]
vault = self.backend.get_vault(vault_name)
if vault.job_ready(job_id):
output = vault.get_job_output(job_id)
if isinstance(output, dict):
headers["content-type"] = "application/json"
return 200, headers, json.dumps(output)
else:
headers["content-type"] = "application/octet-stream"
return 200, headers, output
else:
return 404, headers, "404 Not Found"
|
def _vault_jobs_output_response(self, request, full_url, headers):
vault_name = full_url.split("/")[-4]
job_id = full_url.split("/")[-2]
vault = self.backend.get_vault(vault_name)
output = vault.get_job_output(job_id)
headers["content-type"] = "application/octet-stream"
return 200, headers, output
|
https://github.com/spulec/moto/issues/1113
|
127.0.0.1 - - [06/Sep/2017 14:40:32] "POST /kefo/vaults/testvault/archives HTTP/1.1" 500 -
Error on request:
Traceback (most recent call last):
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 209, in run_wsgi
execute(self.server.app)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/serving.py", line 197, in execute
application_iter = app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/server.py", line 92, in __call__
return backend_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1997, in __call__
return self.wsgi_app(environ, start_response)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1985, in wsgi_app
response = self.handle_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/core/utils.py", line 131, in __call__
result = self.callback(request, request.url, {})
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 71, in vault_archive_response
return response_instance._vault_archive_response(request, full_url, headers)
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/moto/glacier/responses.py", line 75, in _vault_archive_response
body = request.body
File "/Users/kford1/Work/motoenv/lib/python3.5/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: 'Request' object has no attribute 'body'
|
AttributeError
|
def get_s3_bucket_list(boto3_session):
client = boto3_session.client("s3")
# NOTE no paginator available for this operation
buckets = client.list_buckets()
for bucket in buckets["Buckets"]:
bucket["Region"] = client.get_bucket_location(Bucket=bucket["Name"])[
"LocationConstraint"
]
return buckets
|
def get_s3_bucket_list(boto3_session):
client = boto3_session.client("s3")
# NOTE no paginator available for this operation
return client.list_buckets()
|
https://github.com/lyft/cartography/issues/231
|
ERROR:cartography.sync:Unhandled exception during sync stage 'aws'
Traceback (most recent call last):
File "/usr/local/bin/cartography", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.6/site-packages/cartography/cli.py", line 216, in main
return CLI(default_sync, prog='cartography').main(argv)
File "/usr/local/lib/python3.6/site-packages/cartography/cli.py", line 196, in main
return cartography.sync.run_with_config(self.sync, config)
File "/usr/local/lib/python3.6/site-packages/cartography/sync.py", line 135, in run_with_config
return sync.run(neo4j_driver, config)
File "/usr/local/lib/python3.6/site-packages/cartography/sync.py", line 69, in run
stage_func(neo4j_session, config)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 106, in start_aws_ingestion
_sync_multiple_accounts(neo4j_session, aws_accounts, regions, config.update_tag, common_job_parameters)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 44, in _sync_multiple_accounts
_sync_one_account(neo4j_session, boto3_session, account_id, regions, sync_tag, common_job_parameters)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 22, in _sync_one_account
s3.sync(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 326, in sync
load_s3_details(neo4j_session, acl_and_policy_data_iter, current_aws_account_id, aws_update_tag)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 136, in load_s3_details
for bucket, acl, policy in s3_details_iter:
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 26, in get_s3_bucket_details
acl = get_acl(bucket, client)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 54, in get_acl
acl = client.get_bucket_acl(Bucket=bucket['Name'])
File "/Users/drosenbloom/Library/Python/3.6/lib/python/site-packages/botocore/client.py", line 272, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/drosenbloom/Library/Python/3.6/lib/python/site-packages/botocore/client.py", line 576, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (IllegalLocationConstraintException) when calling the GetBucketAcl operation: The me-south-1 location constraint is incompatible for the region specific endpoint this request was sent to.
|
botocore.exceptions.ClientError
|
def get_s3_bucket_details(boto3_session, bucket_data):
"""
Iterates over all S3 buckets. Yields bucket name (string) and pairs of S3 bucket policies (JSON) and ACLs (JSON)
"""
# a local store for s3 clients so that we may re-use clients for an AWS region
s3_regional_clients = {}
for bucket in bucket_data["Buckets"]:
# Use us-east-1 region if no region was recognized for buckets
# It was found that client.get_bucket_location does not return a region for buckets
# in us-east-1 region
client = s3_regional_clients.get(bucket["Region"])
if not client:
client = boto3_session.client("s3", bucket["Region"])
s3_regional_clients[bucket["Region"]] = client
acl = get_acl(bucket, client)
policy = get_policy(bucket, client)
yield bucket["Name"], acl, policy
|
def get_s3_bucket_details(boto3_session, bucket_data):
"""
Iterates over all S3 buckets. Yields bucket name (string) and pairs of S3 bucket policies (JSON) and ACLs (JSON)
"""
client = boto3_session.client("s3")
for bucket in bucket_data["Buckets"]:
acl = get_acl(bucket, client)
policy = get_policy(bucket, client)
yield bucket["Name"], acl, policy
|
https://github.com/lyft/cartography/issues/231
|
ERROR:cartography.sync:Unhandled exception during sync stage 'aws'
Traceback (most recent call last):
File "/usr/local/bin/cartography", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.6/site-packages/cartography/cli.py", line 216, in main
return CLI(default_sync, prog='cartography').main(argv)
File "/usr/local/lib/python3.6/site-packages/cartography/cli.py", line 196, in main
return cartography.sync.run_with_config(self.sync, config)
File "/usr/local/lib/python3.6/site-packages/cartography/sync.py", line 135, in run_with_config
return sync.run(neo4j_driver, config)
File "/usr/local/lib/python3.6/site-packages/cartography/sync.py", line 69, in run
stage_func(neo4j_session, config)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 106, in start_aws_ingestion
_sync_multiple_accounts(neo4j_session, aws_accounts, regions, config.update_tag, common_job_parameters)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 44, in _sync_multiple_accounts
_sync_one_account(neo4j_session, boto3_session, account_id, regions, sync_tag, common_job_parameters)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 22, in _sync_one_account
s3.sync(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 326, in sync
load_s3_details(neo4j_session, acl_and_policy_data_iter, current_aws_account_id, aws_update_tag)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 136, in load_s3_details
for bucket, acl, policy in s3_details_iter:
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 26, in get_s3_bucket_details
acl = get_acl(bucket, client)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 54, in get_acl
acl = client.get_bucket_acl(Bucket=bucket['Name'])
File "/Users/drosenbloom/Library/Python/3.6/lib/python/site-packages/botocore/client.py", line 272, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/drosenbloom/Library/Python/3.6/lib/python/site-packages/botocore/client.py", line 576, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (IllegalLocationConstraintException) when calling the GetBucketAcl operation: The me-south-1 location constraint is incompatible for the region specific endpoint this request was sent to.
|
botocore.exceptions.ClientError
|
def get_policy(bucket, client):
"""
Gets the S3 bucket policy. Returns policy string or None if no policy
"""
try:
policy = client.get_bucket_policy(Bucket=bucket["Name"])
except ClientError as e:
# no policy is defined for this bucket
if "NoSuchBucketPolicy" in e.args[0]:
policy = None
elif "AccessDenied" in e.args[0]:
logger.warning(
"Access denied trying to retrieve S3 bucket {} policy".format(
bucket["Name"]
)
)
policy = None
elif "NoSuchBucket" in e.args[0]:
logger.warning(
"get_bucket_policy({}) threw NoSuchBucket exception, skipping".format(
bucket["Name"]
)
)
policy = None
elif "AllAccessDisabled" in e.args[0]:
# Catches the following error : "An error occurred (AllAccessDisabled) when calling the
# GetBucketAcl operation: All access to this object has been disabled"
logger.warning(
"Failed to retrieve S3 bucket {} policies - Bucket is disabled".format(
bucket["Name"]
)
)
policy = None
else:
raise
return policy
|
def get_policy(bucket, client):
"""
Gets the S3 bucket policy. Returns policy string or None if no policy
"""
try:
policy = client.get_bucket_policy(Bucket=bucket["Name"])
except ClientError as e:
# no policy is defined for this bucket
if "NoSuchBucketPolicy" in e.args[0]:
policy = None
elif "AccessDenied" in e.args[0]:
logger.warning(
"Access denied trying to retrieve S3 bucket {} policy".format(
bucket["Name"]
)
)
policy = None
elif "NoSuchBucket" in e.args[0]:
logger.warning(
"get_bucket_policy({}) threw NoSuchBucket exception, skipping".format(
bucket["Name"]
)
)
policy = None
else:
raise
return policy
|
https://github.com/lyft/cartography/issues/231
|
ERROR:cartography.sync:Unhandled exception during sync stage 'aws'
Traceback (most recent call last):
File "/usr/local/bin/cartography", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.6/site-packages/cartography/cli.py", line 216, in main
return CLI(default_sync, prog='cartography').main(argv)
File "/usr/local/lib/python3.6/site-packages/cartography/cli.py", line 196, in main
return cartography.sync.run_with_config(self.sync, config)
File "/usr/local/lib/python3.6/site-packages/cartography/sync.py", line 135, in run_with_config
return sync.run(neo4j_driver, config)
File "/usr/local/lib/python3.6/site-packages/cartography/sync.py", line 69, in run
stage_func(neo4j_session, config)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 106, in start_aws_ingestion
_sync_multiple_accounts(neo4j_session, aws_accounts, regions, config.update_tag, common_job_parameters)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 44, in _sync_multiple_accounts
_sync_one_account(neo4j_session, boto3_session, account_id, regions, sync_tag, common_job_parameters)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 22, in _sync_one_account
s3.sync(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 326, in sync
load_s3_details(neo4j_session, acl_and_policy_data_iter, current_aws_account_id, aws_update_tag)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 136, in load_s3_details
for bucket, acl, policy in s3_details_iter:
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 26, in get_s3_bucket_details
acl = get_acl(bucket, client)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 54, in get_acl
acl = client.get_bucket_acl(Bucket=bucket['Name'])
File "/Users/drosenbloom/Library/Python/3.6/lib/python/site-packages/botocore/client.py", line 272, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/drosenbloom/Library/Python/3.6/lib/python/site-packages/botocore/client.py", line 576, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (IllegalLocationConstraintException) when calling the GetBucketAcl operation: The me-south-1 location constraint is incompatible for the region specific endpoint this request was sent to.
|
botocore.exceptions.ClientError
|
def get_acl(bucket, client):
"""
Gets the S3 bucket ACL. Returns ACL string
"""
try:
acl = client.get_bucket_acl(Bucket=bucket["Name"])
except ClientError as e:
if "AccessDenied" in e.args[0]:
logger.warning(
"Failed to retrieve S3 bucket {} ACL - Access Denied".format(
bucket["Name"]
)
)
return None
elif "NoSuchBucket" in e.args[0]:
logger.warning(
"Failed to retrieve S3 bucket {} ACL - No Such Bucket".format(
bucket["Name"]
)
)
return None
elif "AllAccessDisabled" in e.args[0]:
logger.warning(
"Failed to retrieve S3 bucket {} ACL - Bucket is disabled".format(
bucket["Name"]
)
)
return None
else:
raise
return acl
|
def get_acl(bucket, client):
"""
Gets the S3 bucket ACL. Returns ACL string
"""
try:
acl = client.get_bucket_acl(Bucket=bucket["Name"])
except ClientError as e:
if "AccessDenied" in e.args[0]:
logger.warning(
"Failed to retrieve S3 bucket {} ACL - Access Denied".format(
bucket["Name"]
)
)
return None
elif "NoSuchBucket" in e.args[0]:
logger.warning(
"Failed to retrieve S3 bucket {} ACL - No Such Bucket".format(
bucket["Name"]
)
)
return None
else:
raise
return acl
|
https://github.com/lyft/cartography/issues/231
|
ERROR:cartography.sync:Unhandled exception during sync stage 'aws'
Traceback (most recent call last):
File "/usr/local/bin/cartography", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.6/site-packages/cartography/cli.py", line 216, in main
return CLI(default_sync, prog='cartography').main(argv)
File "/usr/local/lib/python3.6/site-packages/cartography/cli.py", line 196, in main
return cartography.sync.run_with_config(self.sync, config)
File "/usr/local/lib/python3.6/site-packages/cartography/sync.py", line 135, in run_with_config
return sync.run(neo4j_driver, config)
File "/usr/local/lib/python3.6/site-packages/cartography/sync.py", line 69, in run
stage_func(neo4j_session, config)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 106, in start_aws_ingestion
_sync_multiple_accounts(neo4j_session, aws_accounts, regions, config.update_tag, common_job_parameters)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 44, in _sync_multiple_accounts
_sync_one_account(neo4j_session, boto3_session, account_id, regions, sync_tag, common_job_parameters)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 22, in _sync_one_account
s3.sync(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 326, in sync
load_s3_details(neo4j_session, acl_and_policy_data_iter, current_aws_account_id, aws_update_tag)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 136, in load_s3_details
for bucket, acl, policy in s3_details_iter:
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 26, in get_s3_bucket_details
acl = get_acl(bucket, client)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 54, in get_acl
acl = client.get_bucket_acl(Bucket=bucket['Name'])
File "/Users/drosenbloom/Library/Python/3.6/lib/python/site-packages/botocore/client.py", line 272, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/drosenbloom/Library/Python/3.6/lib/python/site-packages/botocore/client.py", line 576, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (IllegalLocationConstraintException) when calling the GetBucketAcl operation: The me-south-1 location constraint is incompatible for the region specific endpoint this request was sent to.
|
botocore.exceptions.ClientError
|
def load_s3_buckets(neo4j_session, data, current_aws_account_id, aws_update_tag):
ingest_bucket = """
MERGE (bucket:S3Bucket{id:{BucketName}})
ON CREATE SET bucket.firstseen = timestamp(), bucket.creationdate = {CreationDate}
SET bucket.name = {BucketName}, bucket.region = {BucketRegion}, bucket.arn = {Arn},
bucket.lastupdated = {aws_update_tag}
WITH bucket
MATCH (owner:AWSAccount{id: {AWS_ACCOUNT_ID}})
MERGE (owner)-[r:RESOURCE]->(bucket)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {aws_update_tag}
"""
# The owner data returned by the API maps to the aws account nickname and not the IAM user
# there doesn't seem to be a way to retreive the mapping but we can get the current context account
# so we map to that directly
for bucket in data["Buckets"]:
arn = "arn:aws:s3:::" + bucket["Name"]
neo4j_session.run(
ingest_bucket,
BucketName=bucket["Name"],
BucketRegion=bucket["Region"],
Arn=arn,
CreationDate=str(bucket["CreationDate"]),
AWS_ACCOUNT_ID=current_aws_account_id,
aws_update_tag=aws_update_tag,
)
|
def load_s3_buckets(neo4j_session, data, current_aws_account_id, aws_update_tag):
ingest_bucket = """
MERGE (bucket:S3Bucket{id:{BucketName}})
ON CREATE SET bucket.firstseen = timestamp(), bucket.creationdate = {CreationDate}
SET bucket.name = {BucketName}, bucket.arn = {Arn}, bucket.lastupdated = {aws_update_tag}
WITH bucket
MATCH (owner:AWSAccount{id: {AWS_ACCOUNT_ID}})
MERGE (owner)-[r:RESOURCE]->(bucket)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {aws_update_tag}
"""
# The owner data returned by the API maps to the aws account nickname and not the IAM user
# there doesn't seem to be a way to retreive the mapping but we can get the current context account
# so we map to that directly
for bucket in data["Buckets"]:
arn = "arn:aws:s3:::" + bucket["Name"]
neo4j_session.run(
ingest_bucket,
BucketName=bucket["Name"],
Arn=arn,
CreationDate=str(bucket["CreationDate"]),
AWS_ACCOUNT_ID=current_aws_account_id,
aws_update_tag=aws_update_tag,
)
|
https://github.com/lyft/cartography/issues/231
|
ERROR:cartography.sync:Unhandled exception during sync stage 'aws'
Traceback (most recent call last):
File "/usr/local/bin/cartography", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.6/site-packages/cartography/cli.py", line 216, in main
return CLI(default_sync, prog='cartography').main(argv)
File "/usr/local/lib/python3.6/site-packages/cartography/cli.py", line 196, in main
return cartography.sync.run_with_config(self.sync, config)
File "/usr/local/lib/python3.6/site-packages/cartography/sync.py", line 135, in run_with_config
return sync.run(neo4j_driver, config)
File "/usr/local/lib/python3.6/site-packages/cartography/sync.py", line 69, in run
stage_func(neo4j_session, config)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 106, in start_aws_ingestion
_sync_multiple_accounts(neo4j_session, aws_accounts, regions, config.update_tag, common_job_parameters)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 44, in _sync_multiple_accounts
_sync_one_account(neo4j_session, boto3_session, account_id, regions, sync_tag, common_job_parameters)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 22, in _sync_one_account
s3.sync(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 326, in sync
load_s3_details(neo4j_session, acl_and_policy_data_iter, current_aws_account_id, aws_update_tag)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 136, in load_s3_details
for bucket, acl, policy in s3_details_iter:
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 26, in get_s3_bucket_details
acl = get_acl(bucket, client)
File "/usr/local/lib/python3.6/site-packages/cartography/intel/aws/s3.py", line 54, in get_acl
acl = client.get_bucket_acl(Bucket=bucket['Name'])
File "/Users/drosenbloom/Library/Python/3.6/lib/python/site-packages/botocore/client.py", line 272, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/drosenbloom/Library/Python/3.6/lib/python/site-packages/botocore/client.py", line 576, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (IllegalLocationConstraintException) when calling the GetBucketAcl operation: The me-south-1 location constraint is incompatible for the region specific endpoint this request was sent to.
|
botocore.exceptions.ClientError
|
def load_ecr_repositories(
neo4j_session, data, region, current_aws_account_id, aws_update_tag
):
query = """
MERGE (repo:ECRRepository{id: {RepositoryArn}})
ON CREATE SET repo.firstseen = timestamp(), repo.arn = {RepositoryArn}, repo.name = {RepositoryName},
repo.region = {Region}, repo.created_at = {CreatedAt}
SET repo.lastupdated = {aws_update_tag}, repo.uri = {RepositoryUri}
WITH repo
MATCH (owner:AWSAccount{id: {AWS_ACCOUNT_ID}})
MERGE (owner)-[r:RESOURCE]->(repo)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {aws_update_tag}
"""
for repo in data:
neo4j_session.run(
query,
RepositoryArn=repo["repositoryArn"],
RepositoryName=repo["repositoryName"],
RepositoryUri=repo["repositoryUri"],
CreatedAt=str(repo["createdAt"]),
Region=region,
aws_update_tag=aws_update_tag,
AWS_ACCOUNT_ID=current_aws_account_id,
).consume() # See issue #440
|
def load_ecr_repositories(
neo4j_session, data, region, current_aws_account_id, aws_update_tag
):
query = """
MERGE (repo:ECRRepository{id: {RepositoryArn}})
ON CREATE SET repo.firstseen = timestamp(), repo.arn = {RepositoryArn}, repo.name = {RepositoryName},
repo.region = {Region}, repo.created_at = {CreatedAt}
SET repo.lastupdated = {aws_update_tag}, repo.uri = {RepositoryUri}
WITH repo
MATCH (owner:AWSAccount{id: {AWS_ACCOUNT_ID}})
MERGE (owner)-[r:RESOURCE]->(repo)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {aws_update_tag}
"""
for repo in data:
neo4j_session.run(
query,
RepositoryArn=repo["repositoryArn"],
RepositoryName=repo["repositoryName"],
RepositoryUri=repo["repositoryUri"],
CreatedAt=str(repo["createdAt"]),
Region=region,
aws_update_tag=aws_update_tag,
AWS_ACCOUNT_ID=current_aws_account_id,
)
|
https://github.com/lyft/cartography/issues/440
|
Traceback (most recent call last):
File "{PATH}/lib/python3.6/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "{OTHER_PATH}/intelmodules/lyft/aws.py", line 234, in lyft_start_aws_ingestion
common_job_parameters
File "{PATH}/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 98, in _sync_multiple_accounts
_sync_one_account(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "{PATH}/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 46, in _sync_one_account
ecr.sync(neo4j_session, boto3_session, regions, account_id, sync_tag, common_job_parameters)
File "{PATH}/lib/python3.6/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{PATH}/lib/python3.6/site-packages/cartography/intel/aws/ecr.py", line 241, in sync
load_ecr_repository_images(neo4j_session, image_data, region, aws_update_tag)
File "{PATH}/lib/python3.6/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{PATH}/lib/python3.6/site-packages/cartography/intel/aws/ecr.py", line 171, in load_ecr_repository_images
Region=region,
File "{PATH}/lib/python3.6/site-packages/neo4j/__init__.py", line 502, in run
self._connection.send()
File "{PATH}/lib/python3.6/site-packages/neobolt/direct.py", line 394, in send
self._send()
File "{PATH}/lib/python3.6/site-packages/neobolt/direct.py", line 409, in _send
self.socket.sendall(data)
File "/usr/lib/python3.6/ssl.py", line 965, in sendall
v = self.send(data[count:])
File "/usr/lib/python3.6/ssl.py", line 935, in send
return self._sslobj.write(data)
File "/usr/lib/python3.6/ssl.py", line 636, in write
return self._sslobj.write(data)
ConnectionResetError: [Errno 104] Connection reset by peer
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "{PATH}/lib/python3.6/site-packages/neo4j/__init__.py", line 399, in close
self._connection.sync()
File "{PATH}/lib/python3.6/site-packages/neobolt/direct.py", line 505, in sync
self.send()
File "{PATH}/lib/python3.6/site-packages/neobolt/direct.py", line 394, in send
self._send()
File "{PATH}/lib/python3.6/site-packages/neobolt/direct.py", line 409, in _send
self.socket.sendall(data)
File "/usr/lib/python3.6/ssl.py", line 965, in sendall
v = self.send(data[count:])
File "/usr/lib/python3.6/ssl.py", line 935, in send
return self._sslobj.write(data)
File "/usr/lib/python3.6/ssl.py", line 636, in write
return self._sslobj.write(data)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "{OTHER_PATH}/intelmodules/syncgraph.py", line 220, in <module>
main(argv)
File "{OTHER_PATH}/intelmodules/syncgraph.py", line 210, in main
return cartography.sync.run_with_config(sync, config)
File "{PATH}/lib/python3.6/site-packages/cartography/sync.py", line 145, in run_with_config
return sync.run(neo4j_driver, config)
File "{PATH}/lib/python3.6/site-packages/cartography/sync.py", line 78, in run
logger.info("Finishing sync stage '%s'", stage_name)
File "{PATH}/lib/python3.6/site-packages/neo4j/__init__.py", line 374, in __exit__
self.close()
File "{PATH}/lib/python3.6/site-packages/neo4j/__init__.py", line 401, in close
ServiceUnavailable, SessionError):
NameError: name 'ServiceUnavailable' is not defined
|
ConnectionResetError
|
def load_ecr_repository_images(neo4j_session, data, region, aws_update_tag):
query = """
MERGE (repo_image:ECRRepositoryImage{id: {RepositoryImageUri}})
ON CREATE SET repo_image.firstseen = timestamp()
SET repo_image.lastupdated = {aws_update_tag}, repo_image.tag = {ImageTag},
repo_image.uri = {RepositoryImageUri}
WITH repo_image
MERGE (image:ECRImage{id: {ImageDigest}})
ON CREATE SET image.firstseen = timestamp(), image.digest = {ImageDigest}
SET image.lastupdated = {aws_update_tag},
image.region = {Region}
WITH repo_image, image
MERGE (repo_image)-[r1:IMAGE]->(image)
ON CREATE SET r1.firstseen = timestamp()
SET r1.lastupdated = {aws_update_tag}
WITH repo_image
MATCH (repo:ECRRepository{uri: {RepositoryUri}})
MERGE (repo)-[r2:REPO_IMAGE]->(repo_image)
ON CREATE SET r2.firstseen = timestamp()
SET r2.lastupdated = {aws_update_tag}
"""
for repo_uri, repo_images in data.items():
for repo_image in repo_images:
image_tag = repo_image.get("imageTag", "")
# TODO this assumes image tags and uris are immutable
repo_image_uri = f"{repo_uri}:{image_tag}" if image_tag else repo_uri
neo4j_session.run(
query,
RepositoryImageUri=repo_image_uri,
ImageDigest=repo_image["imageDigest"],
ImageTag=image_tag,
RepositoryUri=repo_uri,
aws_update_tag=aws_update_tag,
Region=region,
).consume() # See issue #440
|
def load_ecr_repository_images(neo4j_session, data, region, aws_update_tag):
query = """
MERGE (repo_image:ECRRepositoryImage{id: {RepositoryImageUri}})
ON CREATE SET repo_image.firstseen = timestamp()
SET repo_image.lastupdated = {aws_update_tag}, repo_image.tag = {ImageTag},
repo_image.uri = {RepositoryImageUri}
WITH repo_image
MERGE (image:ECRImage{id: {ImageDigest}})
ON CREATE SET image.firstseen = timestamp(), image.digest = {ImageDigest}
SET image.lastupdated = {aws_update_tag},
image.region = {Region}
WITH repo_image, image
MERGE (repo_image)-[r1:IMAGE]->(image)
ON CREATE SET r1.firstseen = timestamp()
SET r1.lastupdated = {aws_update_tag}
WITH repo_image
MATCH (repo:ECRRepository{uri: {RepositoryUri}})
MERGE (repo)-[r2:REPO_IMAGE]->(repo_image)
ON CREATE SET r2.firstseen = timestamp()
SET r2.lastupdated = {aws_update_tag}
"""
for repo_uri, repo_images in data.items():
for repo_image in repo_images:
image_tag = repo_image.get("imageTag", "")
# TODO this assumes image tags and uris are immutable
repo_image_uri = f"{repo_uri}:{image_tag}" if image_tag else repo_uri
neo4j_session.run(
query,
RepositoryImageUri=repo_image_uri,
ImageDigest=repo_image["imageDigest"],
ImageTag=image_tag,
RepositoryUri=repo_uri,
aws_update_tag=aws_update_tag,
Region=region,
)
|
https://github.com/lyft/cartography/issues/440
|
Traceback (most recent call last):
File "{PATH}/lib/python3.6/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "{OTHER_PATH}/intelmodules/lyft/aws.py", line 234, in lyft_start_aws_ingestion
common_job_parameters
File "{PATH}/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 98, in _sync_multiple_accounts
_sync_one_account(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "{PATH}/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 46, in _sync_one_account
ecr.sync(neo4j_session, boto3_session, regions, account_id, sync_tag, common_job_parameters)
File "{PATH}/lib/python3.6/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{PATH}/lib/python3.6/site-packages/cartography/intel/aws/ecr.py", line 241, in sync
load_ecr_repository_images(neo4j_session, image_data, region, aws_update_tag)
File "{PATH}/lib/python3.6/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{PATH}/lib/python3.6/site-packages/cartography/intel/aws/ecr.py", line 171, in load_ecr_repository_images
Region=region,
File "{PATH}/lib/python3.6/site-packages/neo4j/__init__.py", line 502, in run
self._connection.send()
File "{PATH}/lib/python3.6/site-packages/neobolt/direct.py", line 394, in send
self._send()
File "{PATH}/lib/python3.6/site-packages/neobolt/direct.py", line 409, in _send
self.socket.sendall(data)
File "/usr/lib/python3.6/ssl.py", line 965, in sendall
v = self.send(data[count:])
File "/usr/lib/python3.6/ssl.py", line 935, in send
return self._sslobj.write(data)
File "/usr/lib/python3.6/ssl.py", line 636, in write
return self._sslobj.write(data)
ConnectionResetError: [Errno 104] Connection reset by peer
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "{PATH}/lib/python3.6/site-packages/neo4j/__init__.py", line 399, in close
self._connection.sync()
File "{PATH}/lib/python3.6/site-packages/neobolt/direct.py", line 505, in sync
self.send()
File "{PATH}/lib/python3.6/site-packages/neobolt/direct.py", line 394, in send
self._send()
File "{PATH}/lib/python3.6/site-packages/neobolt/direct.py", line 409, in _send
self.socket.sendall(data)
File "/usr/lib/python3.6/ssl.py", line 965, in sendall
v = self.send(data[count:])
File "/usr/lib/python3.6/ssl.py", line 935, in send
return self._sslobj.write(data)
File "/usr/lib/python3.6/ssl.py", line 636, in write
return self._sslobj.write(data)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "{OTHER_PATH}/intelmodules/syncgraph.py", line 220, in <module>
main(argv)
File "{OTHER_PATH}/intelmodules/syncgraph.py", line 210, in main
return cartography.sync.run_with_config(sync, config)
File "{PATH}/lib/python3.6/site-packages/cartography/sync.py", line 145, in run_with_config
return sync.run(neo4j_driver, config)
File "{PATH}/lib/python3.6/site-packages/cartography/sync.py", line 78, in run
logger.info("Finishing sync stage '%s'", stage_name)
File "{PATH}/lib/python3.6/site-packages/neo4j/__init__.py", line 374, in __exit__
self.close()
File "{PATH}/lib/python3.6/site-packages/neo4j/__init__.py", line 401, in close
ServiceUnavailable, SessionError):
NameError: name 'ServiceUnavailable' is not defined
|
ConnectionResetError
|
def load_ecr_image_scan_findings(neo4j_session, data, aws_update_tag):
"""
Creates the path (:Risk:CVE:ECRScanFinding)-[:AFFECTS]->(:Package)-[:DEPLOYED]->(:ECRImage)
:param neo4j_session: The Neo4j session object
:param data: A dict that has been run through transform_ecr_scan_finding_attributes().
:param aws_update_tag: The AWS update tag
"""
query = """
UNWIND {Risks} as risk
MATCH (image:ECRImage{id: {ImageDigest}})
MERGE (pkg:Package{id: risk.package_version + "|" + risk.package_name})
ON CREATE SET pkg.firstseen = timestamp(),
pkg.name = risk.package_name,
pkg.version = risk.package_version
SET pkg.lastupdated = {aws_update_tag}
WITH image, risk, pkg
MERGE (pkg)-[r1:DEPLOYED]->(image)
ON CREATE SET r1.firstseen = timestamp()
SET r1.lastupdated = {aws_update_tag}
WITH pkg, risk
MERGE (r:Risk:CVE:ECRScanFinding{id: risk.name})
ON CREATE SET r.firstseen = timestamp(),
r.name = risk.name,
r.severity = risk.severity
SET r.lastupdated = {aws_update_tag},
r.uri = risk.uri,
r.cvss2_score = risk.CVSS2_SCORE
MERGE (r)-[a:AFFECTS]->(pkg)
ON CREATE SET a.firstseen = timestamp()
SET r.lastupdated = {aws_update_tag}
"""
neo4j_session.run(
query,
Risks=data["findings"],
ImageDigest=data["imageDigest"],
aws_update_tag=aws_update_tag,
).consume() # See issue #440
|
def load_ecr_image_scan_findings(neo4j_session, data, aws_update_tag):
"""
Creates the path (:Risk:CVE:ECRScanFinding)-[:AFFECTS]->(:Package)-[:DEPLOYED]->(:ECRImage)
:param neo4j_session: The Neo4j session object
:param data: A dict that has been run through transform_ecr_scan_finding_attributes().
:param aws_update_tag: The AWS update tag
"""
query = """
UNWIND {Risks} as risk
MATCH (image:ECRImage{id: {ImageDigest}})
MERGE (pkg:Package{id: risk.package_version + "|" + risk.package_name})
ON CREATE SET pkg.firstseen = timestamp(),
pkg.name = risk.package_name,
pkg.version = risk.package_version
SET pkg.lastupdated = {aws_update_tag}
WITH image, risk, pkg
MERGE (pkg)-[r1:DEPLOYED]->(image)
ON CREATE SET r1.firstseen = timestamp()
SET r1.lastupdated = {aws_update_tag}
WITH pkg, risk
MERGE (r:Risk:CVE:ECRScanFinding{id: risk.name})
ON CREATE SET r.firstseen = timestamp(),
r.name = risk.name,
r.severity = risk.severity
SET r.lastupdated = {aws_update_tag},
r.uri = risk.uri,
r.cvss2_score = risk.CVSS2_SCORE
MERGE (r)-[a:AFFECTS]->(pkg)
ON CREATE SET a.firstseen = timestamp()
SET r.lastupdated = {aws_update_tag}
"""
neo4j_session.run(
query,
Risks=data["findings"],
ImageDigest=data["imageDigest"],
aws_update_tag=aws_update_tag,
)
|
https://github.com/lyft/cartography/issues/440
|
Traceback (most recent call last):
File "{PATH}/lib/python3.6/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "{OTHER_PATH}/intelmodules/lyft/aws.py", line 234, in lyft_start_aws_ingestion
common_job_parameters
File "{PATH}/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 98, in _sync_multiple_accounts
_sync_one_account(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "{PATH}/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 46, in _sync_one_account
ecr.sync(neo4j_session, boto3_session, regions, account_id, sync_tag, common_job_parameters)
File "{PATH}/lib/python3.6/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{PATH}/lib/python3.6/site-packages/cartography/intel/aws/ecr.py", line 241, in sync
load_ecr_repository_images(neo4j_session, image_data, region, aws_update_tag)
File "{PATH}/lib/python3.6/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{PATH}/lib/python3.6/site-packages/cartography/intel/aws/ecr.py", line 171, in load_ecr_repository_images
Region=region,
File "{PATH}/lib/python3.6/site-packages/neo4j/__init__.py", line 502, in run
self._connection.send()
File "{PATH}/lib/python3.6/site-packages/neobolt/direct.py", line 394, in send
self._send()
File "{PATH}/lib/python3.6/site-packages/neobolt/direct.py", line 409, in _send
self.socket.sendall(data)
File "/usr/lib/python3.6/ssl.py", line 965, in sendall
v = self.send(data[count:])
File "/usr/lib/python3.6/ssl.py", line 935, in send
return self._sslobj.write(data)
File "/usr/lib/python3.6/ssl.py", line 636, in write
return self._sslobj.write(data)
ConnectionResetError: [Errno 104] Connection reset by peer
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "{PATH}/lib/python3.6/site-packages/neo4j/__init__.py", line 399, in close
self._connection.sync()
File "{PATH}/lib/python3.6/site-packages/neobolt/direct.py", line 505, in sync
self.send()
File "{PATH}/lib/python3.6/site-packages/neobolt/direct.py", line 394, in send
self._send()
File "{PATH}/lib/python3.6/site-packages/neobolt/direct.py", line 409, in _send
self.socket.sendall(data)
File "/usr/lib/python3.6/ssl.py", line 965, in sendall
v = self.send(data[count:])
File "/usr/lib/python3.6/ssl.py", line 935, in send
return self._sslobj.write(data)
File "/usr/lib/python3.6/ssl.py", line 636, in write
return self._sslobj.write(data)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "{OTHER_PATH}/intelmodules/syncgraph.py", line 220, in <module>
main(argv)
File "{OTHER_PATH}/intelmodules/syncgraph.py", line 210, in main
return cartography.sync.run_with_config(sync, config)
File "{PATH}/lib/python3.6/site-packages/cartography/sync.py", line 145, in run_with_config
return sync.run(neo4j_driver, config)
File "{PATH}/lib/python3.6/site-packages/cartography/sync.py", line 78, in run
logger.info("Finishing sync stage '%s'", stage_name)
File "{PATH}/lib/python3.6/site-packages/neo4j/__init__.py", line 374, in __exit__
self.close()
File "{PATH}/lib/python3.6/site-packages/neo4j/__init__.py", line 401, in close
ServiceUnavailable, SessionError):
NameError: name 'ServiceUnavailable' is not defined
|
ConnectionResetError
|
def get_tgw_attachments(boto3_session, region):
client = boto3_session.client(
"ec2", region_name=region, config=get_botocore_config()
)
tgw_attachments = []
try:
paginator = client.get_paginator("describe_transit_gateway_attachments")
for page in paginator.paginate():
tgw_attachments.extend(page["TransitGatewayAttachments"])
except botocore.exceptions.ClientError as e:
logger.warning(
"Could not retrieve Transit Gateway Attachments due to boto3 error %s: %s. Skipping.",
e.response["Error"]["Code"],
e.response["Error"]["Message"],
)
return tgw_attachments
|
def get_tgw_attachments(boto3_session, region):
client = boto3_session.client(
"ec2", region_name=region, config=get_botocore_config()
)
paginator = client.get_paginator("describe_transit_gateway_attachments")
tgw_attachments = []
for page in paginator.paginate():
tgw_attachments.extend(page["TransitGatewayAttachments"])
return tgw_attachments
|
https://github.com/lyft/cartography/issues/428
|
$ cartography --neo4j-uri bolt://localhost:7687
INFO:cartography.sync:Starting sync with update tag '1603410623'
INFO:cartography.sync:Starting sync stage 'create-indexes'
INFO:cartography.intel.create_indexes:Creating indexes for cartography node types.
INFO:cartography.sync:Finishing sync stage 'create-indexes'
INFO:cartography.sync:Starting sync stage 'aws'
INFO:cartography.intel.aws:Syncing AWS account with ID '123456789012' using configured profile 'default'.
INFO:cartography.intel.aws:Trying to autodiscover accounts.
INFO:cartography.intel.aws.iam:Syncing IAM for account '123456789012'.
INFO:cartography.intel.aws.s3:Syncing S3 for account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'eu-north-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'ap-south-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'eu-west-3' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'eu-west-2' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'eu-west-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'ap-northeast-3' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'ap-northeast-2' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'ap-northeast-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'sa-east-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'ca-central-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'ap-east-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'ap-southeast-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'ap-southeast-2' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'eu-central-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'us-east-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'us-east-2' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'us-west-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'us-west-2' in account '123456789012'.
INFO:cartography.intel.aws.ec2:Syncing EC2 for account '123456789012'.
WARNING:cartography.intel.aws.ec2.tgw:Could not retrieve Transit Gateways due to boto3 error InvalidAction: The action DescribeTransitGateways is not valid for this web service.. Skipping.
ERROR:cartography.sync:Unhandled exception during sync stage 'aws'
Traceback (most recent call last):
File "/fakepath/.local/lib/python3.6/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 63, in timed
return method(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 148, in start_aws_ingestion
_sync_multiple_accounts(neo4j_session, aws_accounts, config.update_tag, common_job_parameters)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 96, in _sync_multiple_accounts
_sync_one_account(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 44, in _sync_one_account
ec2.sync(neo4j_session, boto3_session, regions, account_id, sync_tag, common_job_parameters)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 63, in timed
return method(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/ec2/__init__.py", line 37, in sync
sync_transit_gateways(neo4j_session, boto3_session, regions, account_id, sync_tag, common_job_parameters)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 63, in timed
return method(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/ec2/tgw.py", line 228, in sync_transit_gateways
tgw_attachments = get_tgw_attachments(boto3_session, region)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 63, in timed
return method(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 78, in inner_function
return func(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/ec2/tgw.py", line 36, in get_tgw_attachments
for page in paginator.paginate():
File "/fakepath/.local/lib/python3.6/site-packages/botocore/paginate.py", line 255, in __iter__
response = self._make_request(current_kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/botocore/paginate.py", line 332, in _make_request
return self._method(**current_kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidAction) when calling the DescribeTransitGatewayAttachments operation: The action DescribeTransitGatewayAttachments is not valid for this web service.
Traceback (most recent call last):
File "/fakepath/.local/bin/cartography", line 11, in <module>
sys.exit(main())
File "/fakepath/.local/lib/python3.6/site-packages/cartography/cli.py", line 348, in main
return CLI(default_sync, prog='cartography').main(argv)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/cli.py", line 328, in main
return cartography.sync.run_with_config(self.sync, config)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/sync.py", line 145, in run_with_config
return sync.run(neo4j_driver, config)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 63, in timed
return method(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 148, in start_aws_ingestion
_sync_multiple_accounts(neo4j_session, aws_accounts, config.update_tag, common_job_parameters)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 96, in _sync_multiple_accounts
_sync_one_account(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 44, in _sync_one_account
ec2.sync(neo4j_session, boto3_session, regions, account_id, sync_tag, common_job_parameters)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 63, in timed
return method(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/ec2/__init__.py", line 37, in sync
sync_transit_gateways(neo4j_session, boto3_session, regions, account_id, sync_tag, common_job_parameters)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 63, in timed
return method(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/ec2/tgw.py", line 228, in sync_transit_gateways
tgw_attachments = get_tgw_attachments(boto3_session, region)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 63, in timed
return method(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 78, in inner_function
return func(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/ec2/tgw.py", line 36, in get_tgw_attachments
for page in paginator.paginate():
File "/fakepath/.local/lib/python3.6/site-packages/botocore/paginate.py", line 255, in __iter__
response = self._make_request(current_kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/botocore/paginate.py", line 332, in _make_request
return self._method(**current_kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidAction) when calling the DescribeTransitGatewayAttachments operation: The action DescribeTransitGatewayAttachments is not valid for this web service.
|
botocore.exceptions.ClientError
|
def get_tgw_vpc_attachments(boto3_session, region):
client = boto3_session.client(
"ec2", region_name=region, config=get_botocore_config()
)
tgw_vpc_attachments = []
try:
paginator = client.get_paginator("describe_transit_gateway_vpc_attachments")
for page in paginator.paginate():
tgw_vpc_attachments.extend(page["TransitGatewayVpcAttachments"])
except botocore.exceptions.ClientError as e:
logger.warning(
"Could not retrieve Transit Gateway VPC Attachments due to boto3 error %s: %s. Skipping.",
e.response["Error"]["Code"],
e.response["Error"]["Message"],
)
return tgw_vpc_attachments
|
def get_tgw_vpc_attachments(boto3_session, region):
client = boto3_session.client(
"ec2", region_name=region, config=get_botocore_config()
)
paginator = client.get_paginator("describe_transit_gateway_vpc_attachments")
tgw_vpc_attachments = []
for page in paginator.paginate():
tgw_vpc_attachments.extend(page["TransitGatewayVpcAttachments"])
return tgw_vpc_attachments
|
https://github.com/lyft/cartography/issues/428
|
$ cartography --neo4j-uri bolt://localhost:7687
INFO:cartography.sync:Starting sync with update tag '1603410623'
INFO:cartography.sync:Starting sync stage 'create-indexes'
INFO:cartography.intel.create_indexes:Creating indexes for cartography node types.
INFO:cartography.sync:Finishing sync stage 'create-indexes'
INFO:cartography.sync:Starting sync stage 'aws'
INFO:cartography.intel.aws:Syncing AWS account with ID '123456789012' using configured profile 'default'.
INFO:cartography.intel.aws:Trying to autodiscover accounts.
INFO:cartography.intel.aws.iam:Syncing IAM for account '123456789012'.
INFO:cartography.intel.aws.s3:Syncing S3 for account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'eu-north-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'ap-south-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'eu-west-3' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'eu-west-2' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'eu-west-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'ap-northeast-3' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'ap-northeast-2' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'ap-northeast-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'sa-east-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'ca-central-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'ap-east-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'ap-southeast-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'ap-southeast-2' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'eu-central-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'us-east-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'us-east-2' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'us-west-1' in account '123456789012'.
INFO:cartography.intel.aws.dynamodb:Syncing DynamoDB for region in 'us-west-2' in account '123456789012'.
INFO:cartography.intel.aws.ec2:Syncing EC2 for account '123456789012'.
WARNING:cartography.intel.aws.ec2.tgw:Could not retrieve Transit Gateways due to boto3 error InvalidAction: The action DescribeTransitGateways is not valid for this web service.. Skipping.
ERROR:cartography.sync:Unhandled exception during sync stage 'aws'
Traceback (most recent call last):
File "/fakepath/.local/lib/python3.6/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 63, in timed
return method(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 148, in start_aws_ingestion
_sync_multiple_accounts(neo4j_session, aws_accounts, config.update_tag, common_job_parameters)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 96, in _sync_multiple_accounts
_sync_one_account(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 44, in _sync_one_account
ec2.sync(neo4j_session, boto3_session, regions, account_id, sync_tag, common_job_parameters)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 63, in timed
return method(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/ec2/__init__.py", line 37, in sync
sync_transit_gateways(neo4j_session, boto3_session, regions, account_id, sync_tag, common_job_parameters)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 63, in timed
return method(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/ec2/tgw.py", line 228, in sync_transit_gateways
tgw_attachments = get_tgw_attachments(boto3_session, region)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 63, in timed
return method(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 78, in inner_function
return func(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/ec2/tgw.py", line 36, in get_tgw_attachments
for page in paginator.paginate():
File "/fakepath/.local/lib/python3.6/site-packages/botocore/paginate.py", line 255, in __iter__
response = self._make_request(current_kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/botocore/paginate.py", line 332, in _make_request
return self._method(**current_kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidAction) when calling the DescribeTransitGatewayAttachments operation: The action DescribeTransitGatewayAttachments is not valid for this web service.
Traceback (most recent call last):
File "/fakepath/.local/bin/cartography", line 11, in <module>
sys.exit(main())
File "/fakepath/.local/lib/python3.6/site-packages/cartography/cli.py", line 348, in main
return CLI(default_sync, prog='cartography').main(argv)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/cli.py", line 328, in main
return cartography.sync.run_with_config(self.sync, config)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/sync.py", line 145, in run_with_config
return sync.run(neo4j_driver, config)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 63, in timed
return method(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 148, in start_aws_ingestion
_sync_multiple_accounts(neo4j_session, aws_accounts, config.update_tag, common_job_parameters)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 96, in _sync_multiple_accounts
_sync_one_account(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 44, in _sync_one_account
ec2.sync(neo4j_session, boto3_session, regions, account_id, sync_tag, common_job_parameters)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 63, in timed
return method(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/ec2/__init__.py", line 37, in sync
sync_transit_gateways(neo4j_session, boto3_session, regions, account_id, sync_tag, common_job_parameters)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 63, in timed
return method(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/ec2/tgw.py", line 228, in sync_transit_gateways
tgw_attachments = get_tgw_attachments(boto3_session, region)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 63, in timed
return method(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/util.py", line 78, in inner_function
return func(*args, **kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/cartography/intel/aws/ec2/tgw.py", line 36, in get_tgw_attachments
for page in paginator.paginate():
File "/fakepath/.local/lib/python3.6/site-packages/botocore/paginate.py", line 255, in __iter__
response = self._make_request(current_kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/botocore/paginate.py", line 332, in _make_request
return self._method(**current_kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/fakepath/.local/lib/python3.6/site-packages/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidAction) when calling the DescribeTransitGatewayAttachments operation: The action DescribeTransitGatewayAttachments is not valid for this web service.
|
botocore.exceptions.ClientError
|
def get_role_policy_data(boto3_session, role_list):
resource_client = boto3_session.resource("iam")
policies = {}
for role in role_list:
name = role["RoleName"]
arn = role["Arn"]
resource_role = resource_client.Role(name)
try:
policies[arn] = {
p.name: p.policy_document["Statement"]
for p in resource_role.policies.all()
}
except resource_client.meta.client.exceptions.NoSuchEntityException:
logger.warning(
f"Could not get policies for role {name} due to NoSuchEntityException; skipping.",
)
return policies
|
def get_role_policy_data(boto3_session, role_list):
resource_client = boto3_session.resource("iam")
policies = {}
for role in role_list:
name = role["RoleName"]
arn = role["Arn"]
resource_role = resource_client.Role(name)
policies[arn] = {
p.name: p.policy_document["Statement"] for p in resource_role.policies.all()
}
return policies
|
https://github.com/lyft/cartography/issues/406
|
Traceback (most recent call last):
File "{PATH}/intelmodules/syncgraph.py", line 220, in <module>
main(argv)
File "{PATH}/intelmodules/syncgraph.py", line 210, in main
return cartography.sync.run_with_config(sync, config)
File "/lib/python3.6/site-packages/cartography/sync.py", line 145, in run_with_config
return sync.run(neo4j_driver, config)
File "/lib/python3.6/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "{PATH}/intelmodules/lyft/aws.py", line 234, in lyft_start_aws_ingestion
common_job_parameters
File "/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 96, in _sync_multiple_accounts
_sync_one_account(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "/lib/python3.6/site-packages/cartography/intel/aws/__init__.py", line 27, in _sync_one_account
iam.sync(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "/lib/python3.6/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "/lib/python3.6/site-packages/cartography/intel/aws/iam.py", line 656, in sync
sync_roles(neo4j_session, boto3_session, account_id, update_tag, common_job_parameters)
File "/lib/python3.6/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "/lib/python3.6/site-packages/cartography/intel/aws/iam.py", line 595, in sync_roles
sync_role_inline_policies(current_aws_account_id, boto3_session, data, neo4j_session, aws_update_tag)
File "/lib/python3.6/site-packages/cartography/intel/aws/iam.py", line 611, in sync_role_inline_policies
inline_policy_data = get_role_policy_data(boto3_session, data["Roles"])
File "/lib/python3.6/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "/lib/python3.6/site-packages/cartography/intel/aws/iam.py", line 120, in get_role_policy_data
policies[arn] = {p.name: p.policy_document["Statement"] for p in resource_role.policies.all()}
File "/lib/python3.6/site-packages/cartography/intel/aws/iam.py", line 120, in <dictcomp>
policies[arn] = {p.name: p.policy_document["Statement"] for p in resource_role.policies.all()}
File "/lib/python3.6/site-packages/boto3/resources/collection.py", line 83, in __iter__
for page in self.pages():
File "/lib/python3.6/site-packages/boto3/resources/collection.py", line 166, in pages
for page in pages:
File "/lib/python3.6/site-packages/botocore/paginate.py", line 255, in __iter__
response = self._make_request(current_kwargs)
File "/lib/python3.6/site-packages/botocore/paginate.py", line 332, in _make_request
return self._method(**current_kwargs)
File "/lib/python3.6/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/lib/python3.6/site-packages/botocore/client.py", line 661, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.errorfactory.NoSuchEntityException: An error occurred (NoSuchEntity) when calling the ListRolePolicies operation: The role with name NAME_HERE cannot be found.
|
botocore.errorfactory.NoSuchEntityException
|
def _services_enabled_on_project(serviceusage, project_id):
"""
Return a list of all Google API services that are enabled on the given project ID.
See https://cloud.google.com/service-usage/docs/reference/rest/v1/services/list for data shape.
:param serviceusage: the serviceusage resource provider. See https://cloud.google.com/service-usage/docs/overview.
:param project_id: The project ID number to sync. See the `projectId` field in
https://cloud.google.com/resource-manager/reference/rest/v1/projects
:return: A set of services that are enabled on the project
"""
try:
req = serviceusage.services().list(
parent=f"projects/{project_id}", filter="state:ENABLED"
)
res = req.execute()
if "services" in res:
return {svc["config"]["name"] for svc in res["services"]}
else:
return {}
except googleapiclient.discovery.HttpError as http_error:
http_error = json.loads(http_error.content.decode("utf-8"))
# This is set to log-level `info` because Google creates many projects under the hood that cartography cannot
# audit (e.g. adding a script to a Google spreadsheet causes a project to get created) and we don't need to emit
# a warning for these projects.
logger.info(
f"HttpError when trying to get enabled services on project {project_id}. "
f"Code: {http_error['error']['code']}, Message: {http_error['error']['message']}. "
f"Skipping.",
)
return {}
|
def _services_enabled_on_project(serviceusage, project_id):
"""
Return a list of all Google API services that are enabled on the given project ID.
See https://cloud.google.com/service-usage/docs/reference/rest/v1/services/list for data shape.
:param serviceusage: the serviceusage resource provider. See https://cloud.google.com/service-usage/docs/overview.
:param project_id: The project ID number to sync. See the `projectId` field in
https://cloud.google.com/resource-manager/reference/rest/v1/projects
:return: A set of services that are enabled on the project
"""
try:
req = serviceusage.services().list(
parent=f"projects/{project_id}", filter="state:ENABLED"
)
res = req.execute()
if "services" in res:
return {svc["config"]["name"] for svc in res["services"]}
else:
return {}
except googleapiclient.discovery.HttpError as http_error:
http_error = json.loads(http_error.content.decode("utf-8"))
logger.warning(
f"HttpError when trying to get enabled services on project {project_id}. "
f"Code: {http_error['error']['code']}, Message: {http_error['error']['message']}. "
f"Skipping.",
)
return {}
|
https://github.com/lyft/cartography/issues/377
|
Traceback (most recent call last):
File "{path}/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 218, in start_gcp_ingestion
_sync_multiple_projects(neo4j_session, resources, projects, config.update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 177, in _sync_multiple_projects
_sync_single_project(neo4j_session, resources, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 155, in _sync_single_project
gke.sync_gke_clusters(neo4j_session, resources.container, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 197, in sync_gke_clusters
gke_res = get_gke_clusters(container, project_id)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 28, in get_gke_clusters
res = req.execute()
File "{path}/site-packages/googleapiclient/_helpers.py", line 134, in positional_wrapper
return wrapped(*args, **kwargs)
File "{path}/site-packages/googleapiclient/http.py", line 907, in execute
raise HttpError(resp, content, uri=self.uri)
googleapiclient.errors.HttpError: <HttpError 403 when requesting https://container.googleapis.com/v1/projects/{ProjectName}}/zones/-/clusters?alt=json returned "Required "container.clusters.list" permission(s) for "projects/{ProjectName}".">
|
googleapiclient.errors.HttpError
|
def sync_gcp_instances(
neo4j_session, compute, project_id, zones, gcp_update_tag, common_job_parameters
):
"""
Get GCP instances using the Compute resource object, ingest to Neo4j, and clean up old data.
:param neo4j_session: The Neo4j session object
:param compute: The GCP Compute resource object
:param project_id: The project ID number to sync. See the `projectId` field in
https://cloud.google.com/resource-manager/reference/rest/v1/projects
:param zones: The list of all zone names that are enabled for this project; this is the output of
`get_zones_in_project()`
:param gcp_update_tag: The timestamp value to set our new Neo4j nodes with
:param common_job_parameters: dict of other job parameters to pass to Neo4j
:return: Nothing
"""
instance_responses = get_gcp_instance_responses(project_id, zones, compute)
instance_list = transform_gcp_instances(instance_responses)
load_gcp_instances(neo4j_session, instance_list, gcp_update_tag)
# TODO scope the cleanup to the current project - https://github.com/lyft/cartography/issues/381
cleanup_gcp_instances(neo4j_session, common_job_parameters)
|
def sync_gcp_instances(
neo4j_session, compute, project_id, zones, gcp_update_tag, common_job_parameters
):
"""
Get GCP instances using the Compute resource object, ingest to Neo4j, and clean up old data.
:param neo4j_session: The Neo4j session object
:param compute: The GCP Compute resource object
:param project_id: The project ID number to sync. See the `projectId` field in
https://cloud.google.com/resource-manager/reference/rest/v1/projects
:param zones: The list of all zone names that are enabled for this project; this is the output of
`get_zones_in_project()`
:param gcp_update_tag: The timestamp value to set our new Neo4j nodes with
:param common_job_parameters: dict of other job parameters to pass to Neo4j
:return: Nothing
"""
instance_responses = get_gcp_instance_responses(project_id, zones, compute)
instance_list = transform_gcp_instances(instance_responses)
load_gcp_instances(neo4j_session, instance_list, gcp_update_tag)
cleanup_gcp_instances(neo4j_session, common_job_parameters)
|
https://github.com/lyft/cartography/issues/377
|
Traceback (most recent call last):
File "{path}/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 218, in start_gcp_ingestion
_sync_multiple_projects(neo4j_session, resources, projects, config.update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 177, in _sync_multiple_projects
_sync_single_project(neo4j_session, resources, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 155, in _sync_single_project
gke.sync_gke_clusters(neo4j_session, resources.container, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 197, in sync_gke_clusters
gke_res = get_gke_clusters(container, project_id)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 28, in get_gke_clusters
res = req.execute()
File "{path}/site-packages/googleapiclient/_helpers.py", line 134, in positional_wrapper
return wrapped(*args, **kwargs)
File "{path}/site-packages/googleapiclient/http.py", line 907, in execute
raise HttpError(resp, content, uri=self.uri)
googleapiclient.errors.HttpError: <HttpError 403 when requesting https://container.googleapis.com/v1/projects/{ProjectName}}/zones/-/clusters?alt=json returned "Required "container.clusters.list" permission(s) for "projects/{ProjectName}".">
|
googleapiclient.errors.HttpError
|
def sync_gcp_vpcs(
neo4j_session, compute, project_id, gcp_update_tag, common_job_parameters
):
"""
Get GCP VPCs, ingest to Neo4j, and clean up old data.
:param neo4j_session: The Neo4j session
:param compute: The GCP Compute resource object
:param project_id: The project ID to sync
:param gcp_update_tag: The timestamp value to set our new Neo4j nodes with
:param common_job_parameters: dict of other job parameters to pass to Neo4j
:return: Nothing
"""
vpc_res = get_gcp_vpcs(project_id, compute)
vpcs = transform_gcp_vpcs(vpc_res)
load_gcp_vpcs(neo4j_session, vpcs, gcp_update_tag)
# TODO scope the cleanup to the current project - https://github.com/lyft/cartography/issues/381
cleanup_gcp_vpcs(neo4j_session, common_job_parameters)
|
def sync_gcp_vpcs(
neo4j_session, compute, project_id, gcp_update_tag, common_job_parameters
):
"""
Get GCP VPCs, ingest to Neo4j, and clean up old data.
:param neo4j_session: The Neo4j session
:param compute: The GCP Compute resource object
:param project_id: The project ID to sync
:param gcp_update_tag: The timestamp value to set our new Neo4j nodes with
:param common_job_parameters: dict of other job parameters to pass to Neo4j
:return: Nothing
"""
vpc_res = get_gcp_vpcs(project_id, compute)
vpcs = transform_gcp_vpcs(vpc_res)
load_gcp_vpcs(neo4j_session, vpcs, gcp_update_tag)
cleanup_gcp_vpcs(neo4j_session, common_job_parameters)
|
https://github.com/lyft/cartography/issues/377
|
Traceback (most recent call last):
File "{path}/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 218, in start_gcp_ingestion
_sync_multiple_projects(neo4j_session, resources, projects, config.update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 177, in _sync_multiple_projects
_sync_single_project(neo4j_session, resources, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 155, in _sync_single_project
gke.sync_gke_clusters(neo4j_session, resources.container, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 197, in sync_gke_clusters
gke_res = get_gke_clusters(container, project_id)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 28, in get_gke_clusters
res = req.execute()
File "{path}/site-packages/googleapiclient/_helpers.py", line 134, in positional_wrapper
return wrapped(*args, **kwargs)
File "{path}/site-packages/googleapiclient/http.py", line 907, in execute
raise HttpError(resp, content, uri=self.uri)
googleapiclient.errors.HttpError: <HttpError 403 when requesting https://container.googleapis.com/v1/projects/{ProjectName}}/zones/-/clusters?alt=json returned "Required "container.clusters.list" permission(s) for "projects/{ProjectName}".">
|
googleapiclient.errors.HttpError
|
def sync_gcp_subnets(
neo4j_session, compute, project_id, regions, gcp_update_tag, common_job_parameters
):
for r in regions:
subnet_res = get_gcp_subnets(project_id, r, compute)
subnets = transform_gcp_subnets(subnet_res)
load_gcp_subnets(neo4j_session, subnets, gcp_update_tag)
# TODO scope the cleanup to the current project - https://github.com/lyft/cartography/issues/381
cleanup_gcp_subnets(neo4j_session, common_job_parameters)
|
def sync_gcp_subnets(
neo4j_session, compute, project_id, regions, gcp_update_tag, common_job_parameters
):
for r in regions:
subnet_res = get_gcp_subnets(project_id, r, compute)
subnets = transform_gcp_subnets(subnet_res)
load_gcp_subnets(neo4j_session, subnets, gcp_update_tag)
cleanup_gcp_subnets(neo4j_session, common_job_parameters)
|
https://github.com/lyft/cartography/issues/377
|
Traceback (most recent call last):
File "{path}/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 218, in start_gcp_ingestion
_sync_multiple_projects(neo4j_session, resources, projects, config.update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 177, in _sync_multiple_projects
_sync_single_project(neo4j_session, resources, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 155, in _sync_single_project
gke.sync_gke_clusters(neo4j_session, resources.container, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 197, in sync_gke_clusters
gke_res = get_gke_clusters(container, project_id)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 28, in get_gke_clusters
res = req.execute()
File "{path}/site-packages/googleapiclient/_helpers.py", line 134, in positional_wrapper
return wrapped(*args, **kwargs)
File "{path}/site-packages/googleapiclient/http.py", line 907, in execute
raise HttpError(resp, content, uri=self.uri)
googleapiclient.errors.HttpError: <HttpError 403 when requesting https://container.googleapis.com/v1/projects/{ProjectName}}/zones/-/clusters?alt=json returned "Required "container.clusters.list" permission(s) for "projects/{ProjectName}".">
|
googleapiclient.errors.HttpError
|
def sync_gcp_firewall_rules(
neo4j_session, compute, project_id, gcp_update_tag, common_job_parameters
):
"""
Sync GCP firewalls
:param neo4j_session: The Neo4j session
:param compute: The Compute resource object
:param project_id: The project ID that the firewalls are in
:param common_job_parameters: dict of other job params to pass to Neo4j
:return: Nothing
"""
fw_response = get_gcp_firewall_ingress_rules(project_id, compute)
fw_list = transform_gcp_firewall(fw_response)
load_gcp_ingress_firewalls(neo4j_session, fw_list, gcp_update_tag)
# TODO scope the cleanup to the current project - https://github.com/lyft/cartography/issues/381
cleanup_gcp_firewall_rules(neo4j_session, common_job_parameters)
|
def sync_gcp_firewall_rules(
neo4j_session, compute, project_id, gcp_update_tag, common_job_parameters
):
"""
Sync GCP firewalls
:param neo4j_session: The Neo4j session
:param compute: The Compute resource object
:param project_id: The project ID that the firewalls are in
:param common_job_parameters: dict of other job params to pass to Neo4j
:return: Nothing
"""
fw_response = get_gcp_firewall_ingress_rules(project_id, compute)
fw_list = transform_gcp_firewall(fw_response)
load_gcp_ingress_firewalls(neo4j_session, fw_list, gcp_update_tag)
cleanup_gcp_firewall_rules(neo4j_session, common_job_parameters)
|
https://github.com/lyft/cartography/issues/377
|
Traceback (most recent call last):
File "{path}/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 218, in start_gcp_ingestion
_sync_multiple_projects(neo4j_session, resources, projects, config.update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 177, in _sync_multiple_projects
_sync_single_project(neo4j_session, resources, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 155, in _sync_single_project
gke.sync_gke_clusters(neo4j_session, resources.container, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 197, in sync_gke_clusters
gke_res = get_gke_clusters(container, project_id)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 28, in get_gke_clusters
res = req.execute()
File "{path}/site-packages/googleapiclient/_helpers.py", line 134, in positional_wrapper
return wrapped(*args, **kwargs)
File "{path}/site-packages/googleapiclient/http.py", line 907, in execute
raise HttpError(resp, content, uri=self.uri)
googleapiclient.errors.HttpError: <HttpError 403 when requesting https://container.googleapis.com/v1/projects/{ProjectName}}/zones/-/clusters?alt=json returned "Required "container.clusters.list" permission(s) for "projects/{ProjectName}".">
|
googleapiclient.errors.HttpError
|
def get_gke_clusters(container, project_id):
"""
Returns a GCP response object containing a list of GKE clusters within the given project.
:type container: The GCP Container resource object
:param container: The Container resource object created by googleapiclient.discovery.build()
:type project_id: str
:param project_id: The Google Project Id that you are retrieving clusters from
:rtype: Cluster Object
:return: Cluster response object
"""
try:
req = (
container.projects().zones().clusters().list(projectId=project_id, zone="-")
)
res = req.execute()
return res
except HttpError as e:
err = json.loads(e.content.decode("utf-8"))["error"]
if err["status"] == "PERMISSION_DENIED":
logger.warning(
(
"Could not retrieve GKE clusters on project %s due to permissions issue. Code: %s, Message: %s"
),
project_id,
err["code"],
err["message"],
)
return {}
else:
raise
|
def get_gke_clusters(container, project_id):
"""
Returns a list of GKE clusters within some given project.
:type container: The GCP Container resource object
:param container: The Container resource object created by googleapiclient.discovery.build()
:type project_id: str
:param project_id: The Google Project Id that you are retrieving clusters from
:rtype: Cluster Object
:return: Cluster response object
"""
try:
req = (
container.projects().zones().clusters().list(projectId=project_id, zone="-")
)
res = req.execute()
return res
except HttpError as e:
reason = compute._get_error_reason(e)
if reason == "invalid":
logger.warning(
(
"The project %s is invalid - returned a 400 invalid error."
"Full details: %s"
),
project_id,
e,
)
return {}
elif reason == "forbidden":
logger.warning(
(
"You do not have container.projects.zones.clusters.list access to the project %s. "
"Full details: %s"
),
project_id,
e,
)
return {}
else:
raise
|
https://github.com/lyft/cartography/issues/377
|
Traceback (most recent call last):
File "{path}/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 218, in start_gcp_ingestion
_sync_multiple_projects(neo4j_session, resources, projects, config.update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 177, in _sync_multiple_projects
_sync_single_project(neo4j_session, resources, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 155, in _sync_single_project
gke.sync_gke_clusters(neo4j_session, resources.container, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 197, in sync_gke_clusters
gke_res = get_gke_clusters(container, project_id)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 28, in get_gke_clusters
res = req.execute()
File "{path}/site-packages/googleapiclient/_helpers.py", line 134, in positional_wrapper
return wrapped(*args, **kwargs)
File "{path}/site-packages/googleapiclient/http.py", line 907, in execute
raise HttpError(resp, content, uri=self.uri)
googleapiclient.errors.HttpError: <HttpError 403 when requesting https://container.googleapis.com/v1/projects/{ProjectName}}/zones/-/clusters?alt=json returned "Required "container.clusters.list" permission(s) for "projects/{ProjectName}".">
|
googleapiclient.errors.HttpError
|
def load_gke_clusters(neo4j_session, cluster_resp, project_id, gcp_update_tag):
"""
Ingest GCP GKE Clusters to Neo4j
:type neo4j_session: Neo4j session object
:param neo4j session: The Neo4j session object
:type cluster_resp: Dict
:param cluster_resp: A cluster response object from the GKE API
:type gcp_update_tag: timestamp
:param gcp_update_tag: The timestamp value to set our new Neo4j nodes with
:rtype: NoneType
:return: Nothing
"""
query = """
MERGE(cluster:GKECluster{id:{ClusterSelfLink}})
ON CREATE SET
cluster.firstseen = timestamp(),
cluster.created_at = {ClusterCreateTime}
SET
cluster.name = {ClusterName},
cluster.self_link = {ClusterSelfLink},
cluster.description = {ClusterDescription},
cluster.logging_service = {ClusterLoggingService},
cluster.monitoring_service = {ClusterMonitoringService},
cluster.network = {ClusterNetwork},
cluster.subnetwork = {ClusterSubnetwork},
cluster.cluster_ipv4cidr = {ClusterIPv4Cidr},
cluster.zone = {ClusterZone},
cluster.location = {ClusterLocation},
cluster.endpoint = {ClusterEndpoint},
cluster.initial_version = {ClusterInitialVersion},
cluster.current_master_version = {ClusterMasterVersion},
cluster.status = {ClusterStatus},
cluster.services_ipv4cidr = {ClusterServicesIPv4Cidr},
cluster.database_encryption = {ClusterDatabaseEncryption},
cluster.network_policy = {ClusterNetworkPolicy},
cluster.master_authorized_networks = {ClusterMasterAuthorizedNetworks},
cluster.legacy_abac = {ClusterAbac},
cluster.shielded_nodes = {ClusterShieldedNodes},
cluster.private_nodes = {ClusterPrivateNodes},
cluster.private_endpoint_enabled = {ClusterPrivateEndpointEnabled},
cluster.private_endpoint = {ClusterPrivateEndpoint},
cluster.public_endpoint = {ClusterPublicEndpoint},
cluster.masterauth_username = {ClusterMasterUsername},
cluster.masterauth_password = {ClusterMasterPassword}
WITH cluster
MATCH (owner:GCPProject{id:{ProjectId}})
MERGE (owner)-[r:RESOURCE]->(cluster)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {gcp_update_tag}
"""
for cluster in cluster_resp.get("clusters", []):
neo4j_session.run(
query,
ProjectId=project_id,
ClusterSelfLink=cluster["selfLink"],
ClusterCreateTime=cluster["createTime"],
ClusterName=cluster["name"],
ClusterDescription=cluster.get("description"),
ClusterLoggingService=cluster.get("loggingService"),
ClusterMonitoringService=cluster.get("monitoringService"),
ClusterNetwork=cluster.get("network"),
ClusterSubnetwork=cluster.get("subnetwork"),
ClusterIPv4Cidr=cluster.get("clusterIpv4Cidr"),
ClusterZone=cluster.get("zone"),
ClusterLocation=cluster.get("location"),
ClusterEndpoint=cluster.get("endpoint"),
ClusterInitialVersion=cluster.get("initialClusterVersion"),
ClusterMasterVersion=cluster.get("currentMasterVersion"),
ClusterStatus=cluster.get("status"),
ClusterServicesIPv4Cidr=cluster.get("servicesIpv4Cidr"),
ClusterDatabaseEncryption=cluster.get("databaseEncryption", {}).get(
"state"
),
ClusterNetworkPolicy=_process_network_policy(cluster),
ClusterMasterAuthorizedNetworks=cluster.get(
"masterAuthorizedNetworksConfig", {}
).get("enabled"),
ClusterAbac=cluster.get("legacyAbac", {}).get("enabled"),
ClusterShieldedNodes=cluster.get("shieldedNodes", {}).get("enabled"),
ClusterPrivateNodes=cluster.get("privateClusterConfig", {}).get(
"enablePrivateNodes"
),
ClusterPrivateEndpointEnabled=cluster.get("privateClusterConfig", {}).get(
"enablePrivateEndpoint"
),
ClusterPrivateEndpoint=cluster.get("privateClusterConfig", {}).get(
"privateEndpoint"
),
ClusterPublicEndpoint=cluster.get("privateClusterConfig", {}).get(
"publicEndpoint"
),
ClusterMasterUsername=cluster.get("masterAuth", {}).get("username"),
ClusterMasterPassword=cluster.get("masterAuth", {}).get("password"),
gcp_update_tag=gcp_update_tag,
)
|
def load_gke_clusters(neo4j_session, gke_list, project_id, gcp_update_tag):
"""
Ingest GCP GKE Clusters to Neo4j
:type neo4j_session: Neo4j session object
:param neo4j session: The Neo4j session object
:type gke_list: list
:param gke_list: List of GCP GKE Clusters to inject
:type gcp_update_tag: timestamp
:param gcp_update_tag: The timestamp value to set our new Neo4j nodes with
:rtype: NoneType
:return: Nothing
"""
query = """
MERGE(cluster:GKECluster{id:{ClusterSelfLink}})
ON CREATE SET
cluster.firstseen = timestamp(),
cluster.created_at = {ClusterCreateTime}
SET
cluster.name = {ClusterName},
cluster.self_link = {ClusterSelfLink},
cluster.description = {ClusterDescription},
cluster.logging_service = {ClusterLoggingService},
cluster.monitoring_service = {ClusterMonitoringService},
cluster.network = {ClusterNetwork},
cluster.subnetwork = {ClusterSubnetwork},
cluster.cluster_ipv4cidr = {ClusterIPv4Cidr},
cluster.zone = {ClusterZone},
cluster.location = {ClusterLocation},
cluster.endpoint = {ClusterEndpoint},
cluster.initial_version = {ClusterInitialVersion},
cluster.current_master_version = {ClusterMasterVersion},
cluster.status = {ClusterStatus},
cluster.services_ipv4cidr = {ClusterServicesIPv4Cidr},
cluster.database_encryption = {ClusterDatabaseEncryption},
cluster.network_policy = {ClusterNetworkPolicy},
cluster.master_authorized_networks = {ClusterMasterAuthorizedNetworks},
cluster.legacy_abac = {ClusterAbac},
cluster.shielded_nodes = {ClusterShieldedNodes},
cluster.private_nodes = {ClusterPrivateNodes},
cluster.private_endpoint_enabled = {ClusterPrivateEndpointEnabled},
cluster.private_endpoint = {ClusterPrivateEndpoint},
cluster.public_endpoint = {ClusterPublicEndpoint},
cluster.masterauth_username = {ClusterMasterUsername},
cluster.masterauth_password = {ClusterMasterPassword}
WITH cluster
MATCH (owner:GCPProject{id:{ProjectId}})
MERGE (owner)-[r:RESOURCE]->(cluster)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {gcp_update_tag}
"""
for cluster in gke_list.get("clusters", []):
neo4j_session.run(
query,
ProjectId=project_id,
ClusterSelfLink=cluster["selfLink"],
ClusterCreateTime=cluster["createTime"],
ClusterName=cluster["name"],
ClusterDescription=cluster.get("description"),
ClusterLoggingService=cluster.get("loggingService"),
ClusterMonitoringService=cluster.get("monitoringService"),
ClusterNetwork=cluster.get("network"),
ClusterSubnetwork=cluster.get("subnetwork"),
ClusterIPv4Cidr=cluster.get("clusterIpv4Cidr"),
ClusterZone=cluster.get("zone"),
ClusterLocation=cluster.get("location"),
ClusterEndpoint=cluster.get("endpoint"),
ClusterInitialVersion=cluster.get("initialClusterVersion"),
ClusterMasterVersion=cluster.get("currentMasterVersion"),
ClusterStatus=cluster.get("status"),
ClusterServicesIPv4Cidr=cluster.get("servicesIpv4Cidr"),
ClusterDatabaseEncryption=cluster.get("databaseEncryption", {}).get(
"state"
),
ClusterNetworkPolicy=_process_network_policy(cluster),
ClusterMasterAuthorizedNetworks=cluster.get(
"masterAuthorizedNetworksConfig", {}
).get("enabled"),
ClusterAbac=cluster.get("legacyAbac", {}).get("enabled"),
ClusterShieldedNodes=cluster.get("shieldedNodes", {}).get("enabled"),
ClusterPrivateNodes=cluster.get("privateClusterConfig", {}).get(
"enablePrivateNodes"
),
ClusterPrivateEndpointEnabled=cluster.get("privateClusterConfig", {}).get(
"enablePrivateEndpoint"
),
ClusterPrivateEndpoint=cluster.get("privateClusterConfig", {}).get(
"privateEndpoint"
),
ClusterPublicEndpoint=cluster.get("privateClusterConfig", {}).get(
"publicEndpoint"
),
ClusterMasterUsername=cluster.get("masterAuth", {}).get("username"),
ClusterMasterPassword=cluster.get("masterAuth", {}).get("password"),
gcp_update_tag=gcp_update_tag,
)
|
https://github.com/lyft/cartography/issues/377
|
Traceback (most recent call last):
File "{path}/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 218, in start_gcp_ingestion
_sync_multiple_projects(neo4j_session, resources, projects, config.update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 177, in _sync_multiple_projects
_sync_single_project(neo4j_session, resources, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 155, in _sync_single_project
gke.sync_gke_clusters(neo4j_session, resources.container, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 197, in sync_gke_clusters
gke_res = get_gke_clusters(container, project_id)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 28, in get_gke_clusters
res = req.execute()
File "{path}/site-packages/googleapiclient/_helpers.py", line 134, in positional_wrapper
return wrapped(*args, **kwargs)
File "{path}/site-packages/googleapiclient/http.py", line 907, in execute
raise HttpError(resp, content, uri=self.uri)
googleapiclient.errors.HttpError: <HttpError 403 when requesting https://container.googleapis.com/v1/projects/{ProjectName}}/zones/-/clusters?alt=json returned "Required "container.clusters.list" permission(s) for "projects/{ProjectName}".">
|
googleapiclient.errors.HttpError
|
def sync_gke_clusters(
neo4j_session, container, project_id, gcp_update_tag, common_job_parameters
):
"""
Get GCP GKE Clusters using the Container resource object, ingest to Neo4j, and clean up old data.
:type neo4j_session: The Neo4j session object
:param neo4j_session: The Neo4j session
:type container: The Container resource object created by googleapiclient.discovery.build()
:param container: The GCP Container resource object
:type project_id: str
:param project_id: The project ID of the corresponding project
:type gcp_update_tag: timestamp
:param gcp_update_tag: The timestamp value to set our new Neo4j nodes with
:type common_job_parameters: dict
:param common_job_parameters: Dictionary of other job parameters to pass to Neo4j
:rtype: NoneType
:return: Nothing
"""
logger.info("Syncing Compute objects for project %s.", project_id)
gke_res = get_gke_clusters(container, project_id)
load_gke_clusters(neo4j_session, gke_res, project_id, gcp_update_tag)
# TODO scope the cleanup to the current project - https://github.com/lyft/cartography/issues/381
cleanup_gke_clusters(neo4j_session, common_job_parameters)
|
def sync_gke_clusters(
neo4j_session, container, project_id, gcp_update_tag, common_job_parameters
):
"""
Get GCP GKE Clusters using the Container resource object, ingest to Neo4j, and clean up old data.
:type neo4j_session: The Neo4j session object
:param neo4j_session: The Neo4j session
:type container: The Container resource object created by googleapiclient.discovery.build()
:param container: The GCP Container resource object
:type project_id: str
:param project_id: The project ID of the corresponding project
:type gcp_update_tag: timestamp
:param gcp_update_tag: The timestamp value to set our new Neo4j nodes with
:type common_job_parameters: dict
:param common_job_parameters: Dictionary of other job parameters to pass to Neo4j
:rtype: NoneType
:return: Nothing
"""
logger.info("Syncing Compute objects for project %s.", project_id)
gke_res = get_gke_clusters(container, project_id)
load_gke_clusters(neo4j_session, gke_res, project_id, gcp_update_tag)
cleanup_gke_clusters(neo4j_session, common_job_parameters)
|
https://github.com/lyft/cartography/issues/377
|
Traceback (most recent call last):
File "{path}/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 218, in start_gcp_ingestion
_sync_multiple_projects(neo4j_session, resources, projects, config.update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 177, in _sync_multiple_projects
_sync_single_project(neo4j_session, resources, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 155, in _sync_single_project
gke.sync_gke_clusters(neo4j_session, resources.container, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 197, in sync_gke_clusters
gke_res = get_gke_clusters(container, project_id)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 28, in get_gke_clusters
res = req.execute()
File "{path}/site-packages/googleapiclient/_helpers.py", line 134, in positional_wrapper
return wrapped(*args, **kwargs)
File "{path}/site-packages/googleapiclient/http.py", line 907, in execute
raise HttpError(resp, content, uri=self.uri)
googleapiclient.errors.HttpError: <HttpError 403 when requesting https://container.googleapis.com/v1/projects/{ProjectName}}/zones/-/clusters?alt=json returned "Required "container.clusters.list" permission(s) for "projects/{ProjectName}".">
|
googleapiclient.errors.HttpError
|
def sync_gcp_buckets(
neo4j_session, storage, project_id, gcp_update_tag, common_job_parameters
):
"""
Get GCP instances using the Storage resource object, ingest to Neo4j, and clean up old data.
:type neo4j_session: The Neo4j session object
:param neo4j_session: The Neo4j session
:type storage: The storage resource object created by googleapiclient.discovery.build()
:param storage: The GCP Storage resource object
:type project_id: str
:param project_id: The project ID of the corresponding project
:type gcp_update_tag: timestamp
:param gcp_update_tag: The timestamp value to set our new Neo4j nodes with
:type common_job_parameters: dict
:param common_job_parameters: Dictionary of other job parameters to pass to Neo4j
:rtype: NoneType
:return: Nothing
"""
logger.info("Syncing Storage objects for project %s.", project_id)
storage_res = get_gcp_buckets(storage, project_id)
bucket_list = transform_gcp_buckets(storage_res)
load_gcp_buckets(neo4j_session, bucket_list, gcp_update_tag)
# TODO scope the cleanup to the current project - https://github.com/lyft/cartography/issues/381
cleanup_gcp_buckets(neo4j_session, common_job_parameters)
|
def sync_gcp_buckets(
neo4j_session, storage, project_id, gcp_update_tag, common_job_parameters
):
"""
Get GCP instances using the Storage resource object, ingest to Neo4j, and clean up old data.
:type neo4j_session: The Neo4j session object
:param neo4j_session: The Neo4j session
:type storage: The storage resource object created by googleapiclient.discovery.build()
:param storage: The GCP Storage resource object
:type project_id: str
:param project_id: The project ID of the corresponding project
:type gcp_update_tag: timestamp
:param gcp_update_tag: The timestamp value to set our new Neo4j nodes with
:type common_job_parameters: dict
:param common_job_parameters: Dictionary of other job parameters to pass to Neo4j
:rtype: NoneType
:return: Nothing
"""
logger.info("Syncing Storage objects for project %s.", project_id)
storage_res = get_gcp_buckets(storage, project_id)
bucket_list = transform_gcp_buckets(storage_res)
load_gcp_buckets(neo4j_session, bucket_list, gcp_update_tag)
cleanup_gcp_buckets(neo4j_session, common_job_parameters)
|
https://github.com/lyft/cartography/issues/377
|
Traceback (most recent call last):
File "{path}/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 218, in start_gcp_ingestion
_sync_multiple_projects(neo4j_session, resources, projects, config.update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 177, in _sync_multiple_projects
_sync_single_project(neo4j_session, resources, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/intel/gcp/__init__.py", line 155, in _sync_single_project
gke.sync_gke_clusters(neo4j_session, resources.container, project_id, gcp_update_tag, common_job_parameters)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 197, in sync_gke_clusters
gke_res = get_gke_clusters(container, project_id)
File "{path}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{path}/site-packages/cartography/intel/gcp/gke.py", line 28, in get_gke_clusters
res = req.execute()
File "{path}/site-packages/googleapiclient/_helpers.py", line 134, in positional_wrapper
return wrapped(*args, **kwargs)
File "{path}/site-packages/googleapiclient/http.py", line 907, in execute
raise HttpError(resp, content, uri=self.uri)
googleapiclient.errors.HttpError: <HttpError 403 when requesting https://container.googleapis.com/v1/projects/{ProjectName}}/zones/-/clusters?alt=json returned "Required "container.clusters.list" permission(s) for "projects/{ProjectName}".">
|
googleapiclient.errors.HttpError
|
def get_transit_gateways(boto3_session, region):
client = boto3_session.client(
"ec2", region_name=region, config=get_botocore_config()
)
data = []
try:
data = client.describe_transit_gateways()["TransitGateways"]
except botocore.exceptions.ClientError as e:
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/error-handling.html#parsing-error-responses-and-catching-exceptions-from-aws-services
logger.warning(
"Could not retrieve Transit Gateways due to boto3 error %s: %s. Skipping.",
e.response["Error"]["Code"],
e.response["Error"]["Message"],
)
return data
|
def get_transit_gateways(boto3_session, region):
client = boto3_session.client(
"ec2", region_name=region, config=get_botocore_config()
)
return client.describe_transit_gateways()["TransitGateways"]
|
https://github.com/lyft/cartography/issues/375
|
Traceback (most recent call last):
[...]
File "{PATH}/site-packages/cartography/intel/aws/__init__.py", line 96, in _sync_multiple_accounts
_sync_one_account(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "{PATH}/site-packages/cartography/intel/aws/__init__.py", line 44, in _sync_one_account
ec2.sync(neo4j_session, boto3_session, regions, account_id, sync_tag, common_job_parameters)
File "{PATH}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{PATH}/site-packages/cartography/intel/aws/ec2/__init__.py", line 35, in sync
sync_transit_gateways(neo4j_session, boto3_session, regions, account_id, sync_tag, common_job_parameters)
File "{PATH}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{PATH}/site-packages/cartography/intel/aws/ec2/tgw.py", line 210, in sync_transit_gateways
tgws = get_transit_gateways(boto3_session, region)
File "{PATH}/site-packages/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{PATH}/site-packages/cartography/util.py", line 78, in inner_function
return func(*args, **kwargs)
File "{PATH}/site-packages/cartography/intel/aws/ec2/tgw.py", line 16, in get_transit_gateways
return client.describe_transit_gateways()["TransitGateways"]
File "{PATH}/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "{PATH}/site-packages/botocore/client.py", line 661, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidAction) when calling the DescribeTransitGateways operation: The action DescribeTransitGateways is not valid for this web service.
|
botocore.exceptions.ClientError
|
def get_zones_in_project(project_id, compute, max_results=None):
"""
Return the zones where the Compute Engine API is enabled for the given project_id.
See https://cloud.google.com/compute/docs/reference/rest/v1/zones and
https://cloud.google.com/compute/docs/reference/rest/v1/zones/list.
If the API is not enabled or if the project returns a 404-not-found, return None.
:param project_id: The project ID number to sync. See the `projectId` field in
https://cloud.google.com/resource-manager/reference/rest/v1/projects
:param compute: The compute resource object created by googleapiclient.discovery.build()
:param max_results: Optional cap on number of results returned by this function. Default = None, which means no cap.
:return: List of a project's zone objects if Compute API is turned on, else None.
"""
try:
req = compute.zones().list(project=project_id, maxResults=max_results)
res = req.execute()
return res["items"]
except HttpError as e:
reason = _get_error_reason(e)
if reason == "accessNotConfigured":
logger.info(
(
"Google Compute Engine API access is not configured for project %s; skipping. "
"Full details: %s"
),
project_id,
e,
)
return None
elif reason == "notFound":
logger.info(
("Project %s returned a 404 not found error. Full details: %s"),
project_id,
e,
)
return None
elif reason == "forbidden":
logger.info(
(
"Your GCP identity does not have the compute.zones.list permission for project %s; skipping "
"compute sync for this project. Full details: %s"
),
project_id,
e,
)
return None
else:
raise
|
def get_zones_in_project(project_id, compute, max_results=None):
"""
Return the zones where the Compute Engine API is enabled for the given project_id.
See https://cloud.google.com/compute/docs/reference/rest/v1/zones and
https://cloud.google.com/compute/docs/reference/rest/v1/zones/list.
If the API is not enabled or if the project returns a 404-not-found, return None.
:param project_id: The project ID number to sync. See the `projectId` field in
https://cloud.google.com/resource-manager/reference/rest/v1/projects
:param compute: The compute resource object created by googleapiclient.discovery.build()
:param max_results: Optional cap on number of results returned by this function. Default = None, which means no cap.
:return: List of a project's zone objects if Compute API is turned on, else None.
"""
try:
req = compute.zones().list(project=project_id, maxResults=max_results)
res = req.execute()
return res["items"]
except HttpError as e:
reason = _get_error_reason(e)
if reason == "accessNotConfigured":
logger.debug(
(
"Google Compute Engine API access is not configured for project %s. "
"Full details: %s"
),
project_id,
e,
)
return None
elif reason == "notFound":
logger.debug(
("Project %s returned a 404 not found error. Full details: %s"),
project_id,
e,
)
return None
else:
raise
|
https://github.com/lyft/cartography/issues/371
|
Traceback (most recent call last):
File "{Path}/cartography/sync.py", line 145, in run_with_config
return sync.run(neo4j_driver, config)
File "{Path}/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "{Path}/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{Path}/cartography/intel/gcp/__init__.py", line 218, in start_gcp_ingestion
_sync_multiple_projects(neo4j_session, resources, projects, config.update_tag, common_job_parameters)
File "{Path}/cartography/intel/gcp/__init__.py", line 177, in _sync_multiple_projects
_sync_single_project(neo4j_session, resources, project_id, gcp_update_tag, common_job_parameters)
File "{Path}/cartography/intel/gcp/__init__.py", line 151, in _sync_single_project
compute.sync(neo4j_session, resources.compute, project_id, gcp_update_tag, common_job_parameters)
File "{Path}/cartography/intel/gcp/compute.py", line 967, in sync
zones = get_zones_in_project(project_id, compute)
File "{Path}/cartography/util.py", line 58, in timed
result = method(*args, **kwargs)
File "{Path}/cartography/intel/gcp/compute.py", line 55, in get_zones_in_project
res = req.execute()
File "{Path}/googleapiclient/_helpers.py", line 134, in positional_wrapper
return wrapped(*args, **kwargs)
File "{Path}/googleapiclient/http.py", line 907, in execute
raise HttpError(resp, content, uri=self.uri)
googleapiclient.errors.HttpError: <HttpError 403 when requesting https://compute.googleapis.com/compute/v1/projects/{ProjectName}/zones?alt=json returned "Required 'compute.zones.list' permission for 'projects/{ProjectName}'">
|
googleapiclient.errors.HttpError
|
def get_user_managed_policy_data(boto3_session, user_list):
resource_client = boto3_session.resource("iam")
policies = {}
for user in user_list:
name = user["UserName"]
arn = user["Arn"]
resource_user = resource_client.User(name)
try:
policies[arn] = {
p.policy_name: p.default_version.document["Statement"]
for p in resource_user.attached_policies.all()
}
except resource_client.meta.client.exceptions.NoSuchEntityException:
logger.warning(
f"Could not get policies for user {name} due to NoSuchEntityException; skipping.",
)
return policies
|
def get_user_managed_policy_data(boto3_session, user_list):
resource_client = boto3_session.resource("iam")
policies = {}
for user in user_list:
name = user["UserName"]
arn = user["Arn"]
resource_user = resource_client.User(name)
policies[arn] = {
p.policy_name: p.default_version.document["Statement"]
for p in resource_user.attached_policies.all()
}
return policies
|
https://github.com/lyft/cartography/issues/328
|
Traceback (most recent call last):
File "/usr/local/bin/cartography", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/site-packages/cartography/cli.py", line 308, in main
return CLI(default_sync, prog='cartography').main(argv)
File "/usr/local/lib/python3.7/site-packages/cartography/cli.py", line 288, in main
return cartography.sync.run_with_config(self.sync, config)
File "/usr/local/lib/python3.7/site-packages/cartography/sync.py", line 145, in run_with_config
return sync.run(neo4j_driver, config)
File "/usr/local/lib/python3.7/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "/usr/local/lib/python3.7/site-packages/cartography/util.py", line 58, in timed
return method(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/cartography/intel/aws/__init__.py", line 144, in start_aws_ingestion
_sync_multiple_accounts(neo4j_session, aws_accounts, config.update_tag, common_job_parameters)
File "/usr/local/lib/python3.7/site-packages/cartography/intel/aws/__init__.py", line 92, in _sync_multiple_accounts
_sync_one_account(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "/usr/local/lib/python3.7/site-packages/cartography/intel/aws/__init__.py", line 25, in _sync_one_account
iam.sync(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "/usr/local/lib/python3.7/site-packages/cartography/util.py", line 58, in timed
return method(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/cartography/intel/aws/iam.py", line 623, in sync
sync_users(neo4j_session, boto3_session, account_id, update_tag, common_job_parameters)
File "/usr/local/lib/python3.7/site-packages/cartography/util.py", line 58, in timed
return method(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/cartography/intel/aws/iam.py", line 515, in sync_users
sync_user_inline_policies(boto3_session, data, neo4j_session, aws_update_tag)
File "/usr/local/lib/python3.7/site-packages/cartography/util.py", line 58, in timed
return method(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/cartography/intel/aws/iam.py", line 531, in sync_user_inline_policies
policy_data = get_user_policy_data(boto3_session, data['Users'])
File "/usr/local/lib/python3.7/site-packages/cartography/util.py", line 58, in timed
return method(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/cartography/intel/aws/iam.py", line 83, in get_user_policy_data
policies[arn] = {p.name: p.policy_document["Statement"] for p in resource_user.policies.all()}
File "/usr/local/lib/python3.7/site-packages/cartography/intel/aws/iam.py", line 83, in <dictcomp>
policies[arn] = {p.name: p.policy_document["Statement"] for p in resource_user.policies.all()}
File "/usr/local/lib/python3.7/site-packages/boto3/resources/collection.py", line 83, in __iter__
for page in self.pages():
File "/usr/local/lib/python3.7/site-packages/boto3/resources/collection.py", line 166, in pages
for page in pages:
File "/usr/local/lib/python3.7/site-packages/botocore/paginate.py", line 255, in __iter__
response = self._make_request(current_kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/paginate.py", line 332, in _make_request
return self._method(**current_kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 635, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.errorfactory.NoSuchEntityException: An error occurred (NoSuchEntity) when calling the ListUserPolicies operation: The user with name <SANITIZED_USERNAME> cannot be found.
|
botocore.errorfactory.NoSuchEntityException
|
def get_user_policy_data(boto3_session, user_list):
resource_client = boto3_session.resource("iam")
policies = {}
for user in user_list:
name = user["UserName"]
arn = user["Arn"]
resource_user = resource_client.User(name)
try:
policies[arn] = {
p.name: p.policy_document["Statement"]
for p in resource_user.policies.all()
}
except resource_client.meta.client.exceptions.NoSuchEntityException:
logger.warning(
f"Could not get policies for user {name} due to NoSuchEntityException; skipping.",
)
return policies
|
def get_user_policy_data(boto3_session, user_list):
resource_client = boto3_session.resource("iam")
policies = {}
for user in user_list:
name = user["UserName"]
arn = user["Arn"]
resource_user = resource_client.User(name)
policies[arn] = {
p.name: p.policy_document["Statement"] for p in resource_user.policies.all()
}
return policies
|
https://github.com/lyft/cartography/issues/328
|
Traceback (most recent call last):
File "/usr/local/bin/cartography", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/site-packages/cartography/cli.py", line 308, in main
return CLI(default_sync, prog='cartography').main(argv)
File "/usr/local/lib/python3.7/site-packages/cartography/cli.py", line 288, in main
return cartography.sync.run_with_config(self.sync, config)
File "/usr/local/lib/python3.7/site-packages/cartography/sync.py", line 145, in run_with_config
return sync.run(neo4j_driver, config)
File "/usr/local/lib/python3.7/site-packages/cartography/sync.py", line 71, in run
stage_func(neo4j_session, config)
File "/usr/local/lib/python3.7/site-packages/cartography/util.py", line 58, in timed
return method(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/cartography/intel/aws/__init__.py", line 144, in start_aws_ingestion
_sync_multiple_accounts(neo4j_session, aws_accounts, config.update_tag, common_job_parameters)
File "/usr/local/lib/python3.7/site-packages/cartography/intel/aws/__init__.py", line 92, in _sync_multiple_accounts
_sync_one_account(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "/usr/local/lib/python3.7/site-packages/cartography/intel/aws/__init__.py", line 25, in _sync_one_account
iam.sync(neo4j_session, boto3_session, account_id, sync_tag, common_job_parameters)
File "/usr/local/lib/python3.7/site-packages/cartography/util.py", line 58, in timed
return method(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/cartography/intel/aws/iam.py", line 623, in sync
sync_users(neo4j_session, boto3_session, account_id, update_tag, common_job_parameters)
File "/usr/local/lib/python3.7/site-packages/cartography/util.py", line 58, in timed
return method(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/cartography/intel/aws/iam.py", line 515, in sync_users
sync_user_inline_policies(boto3_session, data, neo4j_session, aws_update_tag)
File "/usr/local/lib/python3.7/site-packages/cartography/util.py", line 58, in timed
return method(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/cartography/intel/aws/iam.py", line 531, in sync_user_inline_policies
policy_data = get_user_policy_data(boto3_session, data['Users'])
File "/usr/local/lib/python3.7/site-packages/cartography/util.py", line 58, in timed
return method(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/cartography/intel/aws/iam.py", line 83, in get_user_policy_data
policies[arn] = {p.name: p.policy_document["Statement"] for p in resource_user.policies.all()}
File "/usr/local/lib/python3.7/site-packages/cartography/intel/aws/iam.py", line 83, in <dictcomp>
policies[arn] = {p.name: p.policy_document["Statement"] for p in resource_user.policies.all()}
File "/usr/local/lib/python3.7/site-packages/boto3/resources/collection.py", line 83, in __iter__
for page in self.pages():
File "/usr/local/lib/python3.7/site-packages/boto3/resources/collection.py", line 166, in pages
for page in pages:
File "/usr/local/lib/python3.7/site-packages/botocore/paginate.py", line 255, in __iter__
response = self._make_request(current_kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/paginate.py", line 332, in _make_request
return self._method(**current_kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 635, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.errorfactory.NoSuchEntityException: An error occurred (NoSuchEntity) when calling the ListUserPolicies operation: The user with name <SANITIZED_USERNAME> cannot be found.
|
botocore.errorfactory.NoSuchEntityException
|
def _attach_firewall_rules(neo4j_session, fw, gcp_update_tag):
"""
Attach the allow_rules to the Firewall object
:param neo4j_session: The Neo4j session
:param fw: The Firewall object
:param gcp_update_tag: The timestamp
:return: Nothing
"""
template = Template("""
MATCH (fw:GCPFirewall{id:{FwPartialUri}})
MERGE (rule:IpRule:IpPermissionInbound:GCPIpRule{id:{RuleId}})
ON CREATE SET rule.firstseen = timestamp(),
rule.ruleid = {RuleId}
SET rule.protocol = {Protocol},
rule.fromport = {FromPort},
rule.toport = {ToPort},
rule.lastupdated = {gcp_update_tag}
MERGE (rng:IpRange{id:{Range}})
ON CREATE SET rng.firstseen = timestamp(),
rng.range = {Range}
SET rng.lastupdated = {gcp_update_tag}
MERGE (rng)-[m:MEMBER_OF_IP_RULE]->(rule)
ON CREATE SET m.firstseen = timestamp()
SET m.lastupdated = {gcp_update_tag}
MERGE (fw)<-[r:$fw_rule_relationship_label]-(rule)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {gcp_update_tag}
""")
for list_type in "transformed_allow_list", "transformed_deny_list":
if list_type == "transformed_allow_list":
label = "ALLOWED_BY"
else:
label = "DENIED_BY"
for rule in fw[list_type]:
# It is possible for sourceRanges to not be specified for this rule
# If sourceRanges is not specified then the rule must specify sourceTags.
# Since an IP range cannot have a tag applied to it, it is ok if we don't ingest this rule.
for ip_range in fw.get("sourceRanges", []):
neo4j_session.run(
template.safe_substitute(fw_rule_relationship_label=label),
FwPartialUri=fw["id"],
RuleId=rule["ruleid"],
Protocol=rule["protocol"],
FromPort=rule.get("fromport"),
ToPort=rule.get("toport"),
Range=ip_range,
gcp_update_tag=gcp_update_tag,
)
|
def _attach_firewall_rules(neo4j_session, fw, gcp_update_tag):
"""
Attach the allow_rules to the Firewall object
:param neo4j_session: The Neo4j session
:param fw: The Firewall object
:param gcp_update_tag: The timestamp
:return: Nothing
"""
query = """
MATCH (fw:GCPFirewall{id:{FwPartialUri}})
MERGE (rule:IpRule:IpPermissionInbound:GCPIpRule{id:{RuleId}})
ON CREATE SET rule.firstseen = timestamp(),
rule.ruleid = {RuleId}
SET rule.protocol = {Protocol},
rule.fromport = {FromPort},
rule.toport = {ToPort},
rule.lastupdated = {gcp_update_tag}
MERGE (rng:IpRange{id:{Range}})
ON CREATE SET rng.firstseen = timestamp(),
rng.range = {Range}
SET rng.lastupdated = {gcp_update_tag}
MERGE (rng)-[m:MEMBER_OF_IP_RULE]->(rule)
ON CREATE SET m.firstseen = timestamp()
SET m.lastupdated = {gcp_update_tag}
"""
for list_type in "transformed_allow_list", "transformed_deny_list":
if list_type == "transformed_allow_list":
query += """
MERGE (fw)<-[r:ALLOWED_BY]-(rule)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {gcp_update_tag}
"""
else:
query += """
MERGE (fw)<-[r:DENIED_BY]-(rule)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {gcp_update_tag}
"""
for rule in fw[list_type]:
# It is possible for sourceRanges to not be specified for this rule
# If sourceRanges is not specified then the rule must specify sourceTags.
# Since an IP range cannot have a tag applied to it, it is ok if we don't ingest this rule.
for ip_range in fw.get("sourceRanges", []):
neo4j_session.run(
query,
FwPartialUri=fw["id"],
RuleId=rule["ruleid"],
Protocol=rule["protocol"],
FromPort=rule.get("fromport"),
ToPort=rule.get("toport"),
Range=ip_range,
gcp_update_tag=gcp_update_tag,
)
|
https://github.com/lyft/cartography/issues/183
|
...
INFO:cartography.sync:Finishing sync stage 'aws'
INFO:cartography.sync:Starting sync stage 'gcp'
INFO:oauth2client.transport:Attempting refresh to obtain initial access_token
INFO:oauth2client.client:Refreshing access_token
INFO:oauth2client.transport:Attempting refresh to obtain initial access_token
INFO:oauth2client.client:Refreshing access_token
INFO:cartography.intel.gcp:Syncing GCP project example-project.
INFO:cartography.intel.gcp.compute:Syncing Compute objects for project example-project.
INFO:oauth2client.transport:Attempting refresh to obtain initial access_token
INFO:oauth2client.client:Refreshing access_token
ERROR:cartography.graph.job:Unhandled error while executing statement in job 'cleanup GCP Instances': Variable `r` already declared (line 25, column 26 (offset: 799))
" MERGE (fw)<-[r:DENIED_BY]-(rule)"
^
ERROR:cartography.sync:Unhandled exception during sync stage 'gcp'
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/cartography/sync.py", line 68, in run
stage_func(neo4j_session, config)
File "/usr/lib/python3.7/site-packages/cartography/intel/gcp/__init__.py", line 134, in start_gcp_ingestion
_sync_multiple_projects(neo4j_session, resources, projects, config.update_tag, common_job_parameters)
File "/usr/lib/python3.7/site-packages/cartography/intel/gcp/__init__.py", line 94, in _sync_multiple_projects
_sync_single_project(neo4j_session, resources, project_id, gcp_update_tag, common_job_parameters)
File "/usr/lib/python3.7/site-packages/cartography/intel/gcp/__init__.py", line 72, in _sync_single_project
compute.sync(neo4j_session, resources.compute, project_id, gcp_update_tag, common_job_parameters)
File "/usr/lib/python3.7/site-packages/cartography/intel/gcp/compute.py", line 948, in sync
sync_gcp_firewall_rules(neo4j_session, compute, project_id, gcp_update_tag, common_job_parameters)
File "/usr/lib/python3.7/site-packages/cartography/intel/gcp/compute.py", line 911, in sync_gcp_firewall_rules
cleanup_gcp_firewall_rules(neo4j_session, common_job_parameters)
File "/usr/lib/python3.7/site-packages/cartography/intel/gcp/compute.py", line 853, in cleanup_gcp_firewall_rules
run_cleanup_job('gcp_compute_firewall_cleanup.json', neo4j_session, common_job_parameters)
File "/usr/lib/python3.7/site-packages/cartography/util.py", line 29, in run_cleanup_job
common_job_parameters,
File "/usr/lib/python3.7/site-packages/cartography/graph/job.py", line 95, in run_from_json
job.run(neo4j_session)
File "/usr/lib/python3.7/site-packages/cartography/graph/job.py", line 45, in run
stm.run(neo4j_session)
File "/usr/lib/python3.7/site-packages/cartography/graph/statement.py", line 47, in run
self._run_iterative(session)
File "/usr/lib/python3.7/site-packages/cartography/graph/statement.py", line 81, in _run_iterative
for r in results:
File "/usr/lib/python3.7/site-packages/neo4j/__init__.py", line 948, in records
self._session.fetch()
File "/usr/lib/python3.7/site-packages/neo4j/__init__.py", line 523, in fetch
detail_count, _ = self._connection.fetch()
File "/usr/lib/python3.7/site-packages/neobolt/direct.py", line 422, in fetch
return self._fetch()
File "/usr/lib/python3.7/site-packages/neobolt/direct.py", line 464, in _fetch
response.on_failure(summary_metadata or {})
File "/usr/lib/python3.7/site-packages/neobolt/direct.py", line 759, in on_failure
raise CypherError.hydrate(**metadata)
neobolt.exceptions.CypherSyntaxError: Variable `r` already declared (line 25, column 26 (offset: 799))
" MERGE (fw)<-[r:DENIED_BY]-(rule)"
^
Traceback (most recent call last):
File "/usr/bin/cartography", line 11, in <module>
load_entry_point('cartography==0.10.0', 'console_scripts', 'cartography')()
File "/usr/lib/python3.7/site-packages/cartography/cli.py", line 182, in main
return CLI(default_sync, prog='cartography').main(argv)
File "/usr/lib/python3.7/site-packages/cartography/cli.py", line 162, in main
return cartography.sync.run_with_config(self.sync, config)
File "/usr/lib/python3.7/site-packages/cartography/sync.py", line 134, in run_with_config
return sync.run(neo4j_driver, config)
File "/usr/lib/python3.7/site-packages/cartography/sync.py", line 68, in run
stage_func(neo4j_session, config)
File "/usr/lib/python3.7/site-packages/cartography/intel/gcp/__init__.py", line 134, in start_gcp_ingestion
_sync_multiple_projects(neo4j_session, resources, projects, config.update_tag, common_job_parameters)
File "/usr/lib/python3.7/site-packages/cartography/intel/gcp/__init__.py", line 94, in _sync_multiple_projects
_sync_single_project(neo4j_session, resources, project_id, gcp_update_tag, common_job_parameters)
File "/usr/lib/python3.7/site-packages/cartography/intel/gcp/__init__.py", line 72, in _sync_single_project
compute.sync(neo4j_session, resources.compute, project_id, gcp_update_tag, common_job_parameters)
File "/usr/lib/python3.7/site-packages/cartography/intel/gcp/compute.py", line 948, in sync
sync_gcp_firewall_rules(neo4j_session, compute, project_id, gcp_update_tag, common_job_parameters)
File "/usr/lib/python3.7/site-packages/cartography/intel/gcp/compute.py", line 911, in sync_gcp_firewall_rules
cleanup_gcp_firewall_rules(neo4j_session, common_job_parameters)
File "/usr/lib/python3.7/site-packages/cartography/intel/gcp/compute.py", line 853, in cleanup_gcp_firewall_rules
run_cleanup_job('gcp_compute_firewall_cleanup.json', neo4j_session, common_job_parameters)
File "/usr/lib/python3.7/site-packages/cartography/util.py", line 29, in run_cleanup_job
common_job_parameters,
File "/usr/lib/python3.7/site-packages/cartography/graph/job.py", line 95, in run_from_json
job.run(neo4j_session)
File "/usr/lib/python3.7/site-packages/cartography/graph/job.py", line 45, in run
stm.run(neo4j_session)
File "/usr/lib/python3.7/site-packages/cartography/graph/statement.py", line 47, in run
self._run_iterative(session)
File "/usr/lib/python3.7/site-packages/cartography/graph/statement.py", line 81, in _run_iterative
for r in results:
File "/usr/lib/python3.7/site-packages/neo4j/__init__.py", line 948, in records
self._session.fetch()
File "/usr/lib/python3.7/site-packages/neo4j/__init__.py", line 523, in fetch
detail_count, _ = self._connection.fetch()
File "/usr/lib/python3.7/site-packages/neobolt/direct.py", line 422, in fetch
return self._fetch()
File "/usr/lib/python3.7/site-packages/neobolt/direct.py", line 464, in _fetch
response.on_failure(summary_metadata or {})
File "/usr/lib/python3.7/site-packages/neobolt/direct.py", line 759, in on_failure
raise CypherError.hydrate(**metadata)
neobolt.exceptions.CypherSyntaxError: Variable `r` already declared (line 25, column 26 (offset: 799))
" MERGE (fw)<-[r:DENIED_BY]-(rule)"
^
|
neobolt.exceptions.CypherSyntaxError
|
def start_okta_ingestion(neo4j_session, config):
"""
Starts the OKTA ingestion process
:param neo4j_session: The Neo4j session
:param config: A `cartography.config` object
:return: Nothing
"""
if not config.okta_api_key:
logger.warning(
"No valid Okta credentials could be found. Exiting Okta sync stage.",
)
return
logger.debug(f"Starting Okta sync on {config.okta_org_id}")
common_job_parameters = {
"UPDATE_TAG": config.update_tag,
"OKTA_ORG_ID": config.okta_org_id,
}
state = OktaSyncState()
organization.create_okta_organization(
neo4j_session, config.okta_org_id, config.update_tag
)
users.sync_okta_users(
neo4j_session, config.okta_org_id, config.update_tag, config.okta_api_key, state
)
groups.sync_okta_groups(
neo4j_session, config.okta_org_id, config.update_tag, config.okta_api_key, state
)
applications.sync_okta_applications(
neo4j_session, config.okta_org_id, config.update_tag, config.okta_api_key
)
factors.sync_users_factors(
neo4j_session, config.okta_org_id, config.update_tag, config.okta_api_key, state
)
origins.sync_trusted_origins(
neo4j_session, config.okta_org_id, config.update_tag, config.okta_api_key
)
awssaml.sync_okta_aws_saml(
neo4j_session, config.okta_saml_role_regex, config.update_tag
)
# need creds with permission
# soft fail as some won't be able to get such high priv token
# when we get the E0000006 error
# see https://developer.okta.com/docs/reference/error-codes/
try:
roles.sync_roles(
neo4j_session,
config.okta_org_id,
config.update_tag,
config.okta_api_key,
state,
)
except OktaError as okta_error:
logger.warning(f"Unable to pull admin roles got {okta_error}")
# Getting roles requires super admin which most won't be able to get easily
if okta_error.error_code == "E0000006":
logger.warning(
"Unable to sync admin roles - api token needs admin rights to pull admin roles data"
)
_cleanup_okta_organizations(neo4j_session, common_job_parameters)
|
def start_okta_ingestion(neo4j_session, config):
"""
Starts the OKTA ingestion process
:param neo4j_session: The Neo4j session
:param config: A `cartography.config` object
:return: Nothing
"""
logger.debug(f"Starting Okta sync on {config.okta_org_id}")
common_job_parameters = {
"UPDATE_TAG": config.update_tag,
"OKTA_ORG_ID": config.okta_org_id,
}
state = OktaSyncState()
organization.create_okta_organization(
neo4j_session, config.okta_org_id, config.update_tag
)
users.sync_okta_users(
neo4j_session, config.okta_org_id, config.update_tag, config.okta_api_key, state
)
groups.sync_okta_groups(
neo4j_session, config.okta_org_id, config.update_tag, config.okta_api_key, state
)
applications.sync_okta_applications(
neo4j_session, config.okta_org_id, config.update_tag, config.okta_api_key
)
factors.sync_users_factors(
neo4j_session, config.okta_org_id, config.update_tag, config.okta_api_key, state
)
origins.sync_trusted_origins(
neo4j_session, config.okta_org_id, config.update_tag, config.okta_api_key
)
awssaml.sync_okta_aws_saml(
neo4j_session, config.okta_saml_role_regex, config.update_tag
)
# need creds with permission
# soft fail as some won't be able to get such high priv token
# when we get the E0000006 error
# see https://developer.okta.com/docs/reference/error-codes/
try:
roles.sync_roles(
neo4j_session,
config.okta_org_id,
config.update_tag,
config.okta_api_key,
state,
)
except OktaError as okta_error:
logger.warning(f"Unable to pull admin roles got {okta_error}")
# Getting roles requires super admin which most won't be able to get easily
if okta_error.error_code == "E0000006":
logger.warning(
"Unable to sync admin roles - api token needs admin rights to pull admin roles data"
)
_cleanup_okta_organizations(neo4j_session, common_job_parameters)
|
https://github.com/lyft/cartography/issues/202
|
INFO:cartography.sync:Starting sync stage 'okta'
ERROR:cartography.sync:Unhandled exception during sync stage 'okta'
Traceback (most recent call last):
File "/home/ec2-user/venv/lib64/python3.6/site-packages/cartography/sync.py", line 69, in run
stage_func(neo4j_session, config)
File "/home/ec2-user/venv/lib64/python3.6/site-packages/cartography/intel/okta/__init__.py", line 48, in start_okta_ingestion
users.sync_okta_users(neo4j_session, config.okta_org_id, config.update_tag, config.okta_api_key, state)
AttributeError: 'Namespace' object has no attribute 'okta_api_key'
Traceback (most recent call last):
File "/home/ec2-user/venv/bin/cartography", line 11, in <module>
load_entry_point('cartography==0.13.0', 'console_scripts', 'cartography')()
File "/home/ec2-user/venv/lib64/python3.6/site-packages/cartography/cli.py", line 216, in main
return CLI(default_sync, prog='cartography').main(argv)
File "/home/ec2-user/venv/lib64/python3.6/site-packages/cartography/cli.py", line 196, in main
return cartography.sync.run_with_config(self.sync, config)
File "/home/ec2-user/venv/lib64/python3.6/site-packages/cartography/sync.py", line 135, in run_with_config
return sync.run(neo4j_driver, config)
File "/home/ec2-user/venv/lib64/python3.6/site-packages/cartography/sync.py", line 69, in run
stage_func(neo4j_session, config)
File "/home/ec2-user/venv/lib64/python3.6/site-packages/cartography/intel/okta/__init__.py", line 48, in start_okta_ingestion
users.sync_okta_users(neo4j_session, config.okta_org_id, config.update_tag, config.okta_api_key, state)
AttributeError: 'Namespace' object has no attribute 'okta_api_key'
|
AttributeError
|
def _sign_and_dump_metadata(metadata, args):
"""
<Purpose>
Internal method to sign link or layout metadata and dump it to disk.
<Arguments>
metadata:
Metablock object (contains Link or Layout object)
args:
see argparser
<Exceptions>
SystemExit(0) if signing is successful
SystemExit(2) if any exception occurs
"""
try:
if not args.append:
metadata.signatures = []
signature = None
# If the cli tool was called with `--gpg [KEYID ...]` `args.gpg` is
# a list (not None) and we will try to sign with gpg.
# If `--gpg-home` was not set, args.gpg_home is None and the signer tries
# to use the default gpg keyring.
if args.gpg is not None:
# If `--gpg` was passed without argument we sign with the default key
# Excluded so that coverage does not vary in different test environments
if len(args.gpg) == 0: # pragma: no cover
signature = metadata.sign_gpg(gpg_keyid=None, gpg_home=args.gpg_home)
# Otherwise we sign with each passed keyid
for keyid in args.gpg:
securesystemslib.formats.KEYID_SCHEMA.check_match(keyid)
signature = metadata.sign_gpg(gpg_keyid=keyid, gpg_home=args.gpg_home)
# Alternatively we iterate over passed private key paths `--key KEYPATH
# ...` load the corresponding key from disk and sign with it
elif args.key is not None: # pragma: no branch
if args.key_type is None:
args.key_type = [util.KEY_TYPE_RSA] * len(args.key)
if len(args.key_type) != len(args.key):
raise securesystemslib.exceptions.FormatError(
"number of key_types should match with the number of keys specified"
)
for idx, key_path in enumerate(args.key):
key = util.import_private_key_from_file(key_path, args.key_type[idx])
signature = metadata.sign(key)
# If `--output` was specified we store the signed link or layout metadata
# to that location no matter what
if args.output:
out_path = args.output
# Otherwise, in case of links, we build the filename using the link/step
# name and the keyid of the created signature (there is only one for links)
elif metadata.type_ == "link":
securesystemslib.formats.ANY_SIGNATURE_SCHEMA.check_match(signature)
keyid = signature["keyid"]
out_path = FILENAME_FORMAT.format(
step_name=metadata.signed.name, keyid=keyid
)
# In case of layouts we just override the input file.
elif metadata.type_ == "layout": # pragma: no branch
out_path = args.file
LOG.info("Dumping {0} to '{1}'...".format(metadata.type_, out_path))
metadata.dump(out_path)
sys.exit(0)
except Exception as e:
LOG.error("The following error occurred while signing: {}".format(e))
sys.exit(2)
|
def _sign_and_dump_metadata(metadata, args):
"""
<Purpose>
Internal method to sign link or layout metadata and dump it to disk.
<Arguments>
metadata:
Metablock object (contains Link or Layout object)
args:
see argparser
<Exceptions>
SystemExit(0) if signing is successful
SystemExit(2) if any exception occurs
"""
try:
if not args.append:
metadata.signatures = []
signature = None
# If the cli tool was called with `--gpg [KEYID ...]` `args.gpg` is
# a list (not None) and we will try to sign with gpg.
# If `--gpg-home` was not set, args.gpg_home is None and the signer tries
# to use the default gpg keyring.
if args.gpg is not None:
# If `--gpg` was passed without argument we sign with the default key
# Excluded so that coverage does not vary in different test environments
if len(args.gpg) == 0: # pragma: no cover
signature = metadata.sign_gpg(gpg_keyid=None, gpg_home=args.gpg_home)
# Otherwise we sign with each passed keyid
for keyid in args.gpg:
securesystemslib.formats.KEYID_SCHEMA.check_match(keyid)
signature = metadata.sign_gpg(gpg_keyid=keyid, gpg_home=args.gpg_home)
# Alternatively we iterate over passed private key paths `--key KEYPATH
# ...` load the corresponding key from disk and sign with it
elif args.key is not None: # pragma: no branch
if args.key_type is None:
args.key_type = [util.KEY_TYPE_RSA] * len(args.key)
if len(args.key_type) != len(args.key):
raise securesystemslib.exceptions.FormatError(
"number of key_types should match with the number of keys specified"
)
for idx, key_path in enumerate(args.key):
key = util.import_private_key_from_file(key_path, args.key_type[idx])
signature = metadata.sign(key)
# If `--output` was specified we store the signed link or layout metadata
# to that location no matter what
if args.output:
out_path = args.output
# Otherwise, in case of links, we build the filename using the link/step
# name and the keyid of the created signature (there is only one for links)
elif metadata.type_ == "link":
in_toto.formats.ANY_SIGNATURE_SCHEMA.check_match(signature)
keyid = signature["keyid"]
out_path = FILENAME_FORMAT.format(
step_name=metadata.signed.name, keyid=keyid
)
# In case of layouts we just override the input file.
elif metadata.type_ == "layout": # pragma: no branch
out_path = args.file
LOG.info("Dumping {0} to '{1}'...".format(metadata.type_, out_path))
metadata.dump(out_path)
sys.exit(0)
except Exception as e:
LOG.error("The following error occurred while signing: {}".format(e))
sys.exit(2)
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def main():
"""Parse arguments, load link or layout metadata file and either sign
metadata file or verify its signatures."""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Provides command line interface to sign in-toto link or layout metadata or
verify its signatures, with options to:
* replace (default) or add signature(s):
+ layout metadata can be signed by multiple keys at once,
+ link metadata can only be signed by one key at a time.
* write signed metadata to a specified path. If no output path is specified,
+ layout metadata is written to the path of the input file,
+ link metadata is written to '<name>.<keyid prefix>.link'.
* verify signatures
Returns nonzero value on failure and zero otherwise.""",
)
parser.epilog = """
examples:
Append two signatures to 'unsigned.layout' file and write to 'root.layout'.
{prog} -f unsigned.layout -k priv_key1 priv_key2 -o root.layout -a
Replace signature in link file. And write to default filename, i.e.
'package.<priv_key's keyid prefix>.link'.
{prog} -f package.2f89b927.link -k priv_key
Verify layout signed with 3 keys.
{prog} -f root.layout -k pub_key0 pub_key1 pub_key2 --verify
Sign layout with default gpg key in default gpg keyring.
{prog} -f root.layout --gpg
Verify layout with a gpg key identified by keyid '...439F3C2'.
{prog} -f root.layout --verify \\
--gpg 3BF8135765A07E21BD12BF89A5627F6BF439F3C2
""".format(prog=parser.prog)
named_args = parser.add_argument_group("required named arguments")
named_args.add_argument(
"-f",
"--file",
type=str,
required=True,
metavar="<path>",
help=("Path to link or layout file to be signed or verified."),
)
parser.add_argument(
"-k",
"--key",
nargs="+",
metavar="<path>",
help=(
"Path(s) to PEM formatted key file(s), used to sign the passed link or"
" layout metadata or to verify its signatures."
),
)
parser.add_argument(
"-t",
"--key-type",
dest="key_type",
type=str,
choices=util.SUPPORTED_KEY_TYPES,
nargs="+",
help=(
"Specify the key-type of the keys specified by the '--key'"
" option. Number of values should be the same as the number of keys"
" specified by the '--key' option. If '--key-type' is not passed,"
' default key_type of all keys is assumed to be "rsa".'
),
)
parser.add_argument(
"-g",
"--gpg",
nargs="*",
metavar="<id>",
help=(
"GPG keyid used to sign the passed link or layout metadata or to verify"
" its signatures. If passed without keyid, the default GPG key is"
" used."
),
)
parser.add_argument(
"--gpg-home",
dest="gpg_home",
type=str,
metavar="<path>",
help=(
"Path to GPG keyring to load GPG key identified by '--gpg' option. If"
" '--gpg-home' is not passed, the default GPG keyring is used."
),
)
# Only when signing
parser.add_argument(
"-o",
"--output",
type=str,
metavar="<path>",
help=(
"Path to store metadata file to be signed. If not passed, layout"
" metadata is written to the path of the input file and link metadata is"
" written to '<step name>.<keyid prefix>.link'"
),
)
# Only when signing
parser.add_argument(
"-a",
"--append",
action="store_true",
help=(
"If passed, signatures are added rather than replacing existing"
" signatures. This option is only availabe for layout metdata."
),
)
parser.add_argument(
"--verify",
action="store_true",
help="Verify signature(s) of passed link or layout metadata.",
)
verbosity_args = parser.add_mutually_exclusive_group(required=False)
verbosity_args.add_argument(
"-v",
"--verbose",
dest="verbose",
help="Verbose execution.",
action="store_true",
)
verbosity_args.add_argument(
"-q", "--quiet", dest="quiet", help="Suppress all output.", action="store_true"
)
parser.add_argument(
"--version", action="version", version="{} {}".format(parser.prog, __version__)
)
args = parser.parse_args()
LOG.setLevelVerboseOrQuiet(args.verbose, args.quiet)
# Additional argparse sanitization
# NOTE: This tool is starting to have many inter-dependent argument
# restrictions. Maybe we should make it less sophisticated at some point.
if args.verify and (args.append or args.output):
parser.print_help()
parser.error(
"conflicting arguments: don't specify any of"
" 'append' or 'output' when verifying signatures"
)
# Regular signing and GPG signing are mutually exclusive
if (args.key is None) == (args.gpg is None):
parser.print_help()
parser.error(
"wrong arguments: specify either `--key PATH [PATH ...]`"
" or `--gpg [KEYID [KEYID ...]]`"
)
# For gpg verification we must specify a keyid (no default key is loaded)
if args.verify and args.gpg is not None and len(args.gpg) < 1:
parser.print_help()
parser.error(
"missing arguments: specify at least one keyid for GPG"
" signature verification (`--gpg KEYID ...`)"
)
metadata = _load_metadata(args.file)
# Specific command line argument restrictions if we deal with links
if metadata.type_ == "link":
# Above we check that it's either `--key ...` or `--gpg ...`
# Here we check that it is not more than one in each case when dealing
# with links
link_error_message = (
"Link metadata is associated with a"
" single functionary and is usually namespaced accordingly:"
" '<name>.<keyid>.link'."
)
if (args.key is not None and len(args.key) > 1) or (
args.gpg is not None and len(args.gpg) > 1
):
parser.print_help()
parser.error(
"too many arguments: {} Hence signing Link metadata"
" with multiple keys is not allowed.".format(link_error_message)
)
if args.append:
parser.print_help()
parser.error(
"wrong arguments: {}. Hence adding signatures to"
" existing signatures on Link metadata is not allowed.".format(
link_error_message
)
)
if args.verify:
_verify_metadata(metadata, args)
else:
_sign_and_dump_metadata(metadata, args)
|
def main():
"""Parse arguments, load link or layout metadata file and either sign
metadata file or verify its signatures."""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Provides command line interface to sign in-toto link or layout metadata or
verify its signatures, with options to:
* replace (default) or add signature(s):
+ layout metadata can be signed by multiple keys at once,
+ link metadata can only be signed by one key at a time.
* write signed metadata to a specified path. If no output path is specified,
+ layout metadata is written to the path of the input file,
+ link metadata is written to '<name>.<keyid prefix>.link'.
* verify signatures
Returns nonzero value on failure and zero otherwise.""",
)
parser.epilog = """
examples:
Append two signatures to 'unsigned.layout' file and write to 'root.layout'.
{prog} -f unsigned.layout -k priv_key1 priv_key2 -o root.layout -a
Replace signature in link file. And write to default filename, i.e.
'package.<priv_key's keyid prefix>.link'.
{prog} -f package.2f89b927.link -k priv_key
Verify layout signed with 3 keys.
{prog} -f root.layout -k pub_key0 pub_key1 pub_key2 --verify
Sign layout with default gpg key in default gpg keyring.
{prog} -f root.layout --gpg
Verify layout with a gpg key identified by keyid '...439F3C2'.
{prog} -f root.layout --verify \\
--gpg 3BF8135765A07E21BD12BF89A5627F6BF439F3C2
""".format(prog=parser.prog)
named_args = parser.add_argument_group("required named arguments")
named_args.add_argument(
"-f",
"--file",
type=str,
required=True,
metavar="<path>",
help=("Path to link or layout file to be signed or verified."),
)
parser.add_argument(
"-k",
"--key",
nargs="+",
metavar="<path>",
help=(
"Path(s) to PEM formatted key file(s), used to sign the passed link or"
" layout metadata or to verify its signatures."
),
)
parser.add_argument(
"-t",
"--key-type",
dest="key_type",
type=str,
choices=in_toto.util.SUPPORTED_KEY_TYPES,
nargs="+",
help=(
"Specify the key-type of the keys specified by the '--key'"
" option. Number of values should be the same as the number of keys"
" specified by the '--key' option. If '--key-type' is not passed,"
' default key_type of all keys is assumed to be "rsa".'
),
)
parser.add_argument(
"-g",
"--gpg",
nargs="*",
metavar="<id>",
help=(
"GPG keyid used to sign the passed link or layout metadata or to verify"
" its signatures. If passed without keyid, the default GPG key is"
" used."
),
)
parser.add_argument(
"--gpg-home",
dest="gpg_home",
type=str,
metavar="<path>",
help=(
"Path to GPG keyring to load GPG key identified by '--gpg' option. If"
" '--gpg-home' is not passed, the default GPG keyring is used."
),
)
# Only when signing
parser.add_argument(
"-o",
"--output",
type=str,
metavar="<path>",
help=(
"Path to store metadata file to be signed. If not passed, layout"
" metadata is written to the path of the input file and link metadata is"
" written to '<step name>.<keyid prefix>.link'"
),
)
# Only when signing
parser.add_argument(
"-a",
"--append",
action="store_true",
help=(
"If passed, signatures are added rather than replacing existing"
" signatures. This option is only availabe for layout metdata."
),
)
parser.add_argument(
"--verify",
action="store_true",
help="Verify signature(s) of passed link or layout metadata.",
)
verbosity_args = parser.add_mutually_exclusive_group(required=False)
verbosity_args.add_argument(
"-v",
"--verbose",
dest="verbose",
help="Verbose execution.",
action="store_true",
)
verbosity_args.add_argument(
"-q", "--quiet", dest="quiet", help="Suppress all output.", action="store_true"
)
parser.add_argument(
"--version", action="version", version="{} {}".format(parser.prog, __version__)
)
args = parser.parse_args()
LOG.setLevelVerboseOrQuiet(args.verbose, args.quiet)
# Additional argparse sanitization
# NOTE: This tool is starting to have many inter-dependent argument
# restrictions. Maybe we should make it less sophisticated at some point.
if args.verify and (args.append or args.output):
parser.print_help()
parser.error(
"conflicting arguments: don't specify any of"
" 'append' or 'output' when verifying signatures"
)
# Regular signing and GPG signing are mutually exclusive
if (args.key is None) == (args.gpg is None):
parser.print_help()
parser.error(
"wrong arguments: specify either `--key PATH [PATH ...]`"
" or `--gpg [KEYID [KEYID ...]]`"
)
# For gpg verification we must specify a keyid (no default key is loaded)
if args.verify and args.gpg is not None and len(args.gpg) < 1:
parser.print_help()
parser.error(
"missing arguments: specify at least one keyid for GPG"
" signature verification (`--gpg KEYID ...`)"
)
metadata = _load_metadata(args.file)
# Specific command line argument restrictions if we deal with links
if metadata.type_ == "link":
# Above we check that it's either `--key ...` or `--gpg ...`
# Here we check that it is not more than one in each case when dealing
# with links
link_error_message = (
"Link metadata is associated with a"
" single functionary and is usually namespaced accordingly:"
" '<name>.<keyid>.link'."
)
if (args.key is not None and len(args.key) > 1) or (
args.gpg is not None and len(args.gpg) > 1
):
parser.print_help()
parser.error(
"too many arguments: {} Hence signing Link metadata"
" with multiple keys is not allowed.".format(link_error_message)
)
if args.append:
parser.print_help()
parser.error(
"wrong arguments: {}. Hence adding signatures to"
" existing signatures on Link metadata is not allowed.".format(
link_error_message
)
)
if args.verify:
_verify_metadata(metadata, args)
else:
_sign_and_dump_metadata(metadata, args)
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def add_functionary_key(self, key):
"""
<Purpose>
Add the passed functionary public key to the layout's dictionary of keys.
<Arguments>
key:
A functionary public key conformant with
securesystemslib.formats.ANY_PUBKEY_SCHEMA.
<Exceptions>
securesystemslib.exceptions.FormatError
If the passed key does not match
securesystemslib.formats.ANY_PUBKEY_SCHEMA.
<Returns>
The added functionary public key.
"""
securesystemslib.formats.ANY_PUBKEY_SCHEMA.check_match(key)
keyid = key["keyid"]
self.keys[keyid] = key
return key
|
def add_functionary_key(self, key):
"""
<Purpose>
Add the passed functionary public key to the layout's dictionary of keys.
<Arguments>
key:
A functionary public key conformant with
in_toto.formats.ANY_PUBKEY_SCHEMA.
<Exceptions>
securesystemslib.exceptions.FormatError
If the passed key does not match
in_toto.formats.ANY_PUBKEY_SCHEMA.
<Returns>
The added functionary public key.
"""
in_toto.formats.ANY_PUBKEY_SCHEMA.check_match(key)
keyid = key["keyid"]
self.keys[keyid] = key
return key
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def add_functionary_key_from_gpg_keyid(self, gpg_keyid, gpg_home=None):
"""
<Purpose>
Load a functionary public key from the GPG keychain, located at the
passed GPG home path, identified by the passed GPG keyid, and add it to
the layout's dictionary of keys.
<Arguments>
gpg_keyid:
A GPG keyid.
gpg_home:
A path to the GPG keychain to load the key from. If not passed
the default GPG keychain is used.
<Exceptions>
securesystemslib.exceptions.FormatError
If the passed gpg keyid does not match
securesystemslib.formats.KEYID_SCHEMA.
If the gpg home path is passed and does not match
securesystemslib.formats.PATH_SCHEMA.
If the key loaded from the GPG keychain does not match
securesystemslib.formats.ANY_PUBKEY_SCHEMA.
<Returns>
The added functionary public key.
"""
securesystemslib.formats.KEYID_SCHEMA.check_match(gpg_keyid)
if gpg_home: # pragma: no branch
securesystemslib.formats.PATH_SCHEMA.check_match(gpg_home)
key = securesystemslib.gpg.functions.export_pubkey(gpg_keyid, homedir=gpg_home)
return self.add_functionary_key(key)
|
def add_functionary_key_from_gpg_keyid(self, gpg_keyid, gpg_home=None):
"""
<Purpose>
Load a functionary public key from the GPG keychain, located at the
passed GPG home path, identified by the passed GPG keyid, and add it to
the layout's dictionary of keys.
<Arguments>
gpg_keyid:
A GPG keyid.
gpg_home:
A path to the GPG keychain to load the key from. If not passed
the default GPG keychain is used.
<Exceptions>
securesystemslib.exceptions.FormatError
If the passed gpg keyid does not match
securesystemslib.formats.KEYID_SCHEMA.
If the gpg home path is passed and does not match
securesystemslib.formats.PATH_SCHEMA.
If the key loaded from the GPG keychain does not match
in_toto.formats.ANY_PUBKEY_SCHEMA.
<Returns>
The added functionary public key.
"""
securesystemslib.formats.KEYID_SCHEMA.check_match(gpg_keyid)
if gpg_home: # pragma: no branch
securesystemslib.formats.PATH_SCHEMA.check_match(gpg_home)
key = in_toto.gpg.functions.gpg_export_pubkey(gpg_keyid, homedir=gpg_home)
return self.add_functionary_key(key)
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def add_functionary_keys_from_gpg_keyids(self, gpg_keyid_list, gpg_home=None):
"""
<Purpose>
Load functionary public keys from the GPG keychain, located at the
passed GPG home path, identified by the passed GPG keyids, and add it to
the layout's dictionary of keys.
<Arguments>
gpg_keyid_list:
A list of GPG keyids.
gpg_home:
A path to the GPG keychain to load the keys from. If not passed
the default GPG keychain is used.
<Exceptions>
securesystemslib.exceptions.FormatError
If any of the passed gpg keyids does not match
securesystemslib.formats.KEYID_SCHEMA.
If gpg home is passed and does not match
securesystemslib.formats.PATH_SCHEMA.
If any of the keys loaded from the GPG keychain does not
match securesystemslib.formats.ANY_PUBKEY_SCHEMA.
<Returns>
A dictionary of the added functionary public keys with the key's keyids
as dictionary keys and the keys as values.
"""
securesystemslib.formats.KEYIDS_SCHEMA.check_match(gpg_keyid_list)
key_dict = {}
for gpg_keyid in gpg_keyid_list:
key = self.add_functionary_key_from_gpg_keyid(gpg_keyid, gpg_home)
key_dict[key["keyid"]] = key
return key_dict
|
def add_functionary_keys_from_gpg_keyids(self, gpg_keyid_list, gpg_home=None):
"""
<Purpose>
Load functionary public keys from the GPG keychain, located at the
passed GPG home path, identified by the passed GPG keyids, and add it to
the layout's dictionary of keys.
<Arguments>
gpg_keyid_list:
A list of GPG keyids.
gpg_home:
A path to the GPG keychain to load the keys from. If not passed
the default GPG keychain is used.
<Exceptions>
securesystemslib.exceptions.FormatError
If any of the passed gpg keyids does not match
securesystemslib.formats.KEYID_SCHEMA.
If gpg home is passed and does not match
securesystemslib.formats.PATH_SCHEMA.
If any of the keys loaded from the GPG keychain does not
match in_toto.formats.ANY_PUBKEY_SCHEMA.
<Returns>
A dictionary of the added functionary public keys with the key's keyids
as dictionary keys and the keys as values.
"""
securesystemslib.formats.KEYIDS_SCHEMA.check_match(gpg_keyid_list)
key_dict = {}
for gpg_keyid in gpg_keyid_list:
key = self.add_functionary_key_from_gpg_keyid(gpg_keyid, gpg_home)
key_dict[key["keyid"]] = key
return key_dict
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def _validate_keys(self):
"""Private method to ensure that the keys contained are right."""
securesystemslib.formats.ANY_PUBKEY_DICT_SCHEMA.check_match(self.keys)
|
def _validate_keys(self):
"""Private method to ensure that the keys contained are right."""
in_toto.formats.ANY_PUBKEY_DICT_SCHEMA.check_match(self.keys)
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def sign_gpg(self, gpg_keyid=None, gpg_home=None):
"""
<Purpose>
Signs the utf-8 encoded canonical JSON bytes of the Link or Layout object
contained in `self.signed` using `gpg.functions.create_signature` and
appends the created signature to `self.signatures`.
<Arguments>
gpg_keyid: (optional)
A gpg keyid, if omitted the default signing key is used
gpg_home: (optional)
The path to the gpg keyring, if omitted the default gpg keyring
is used
<Exceptions>
securesystemslib.gpg.exceptions.CommandError:
If the gpg signing command returned a non-zero exit code, e.g.
because the key has expired.
<Returns>
The dictionary representation of the newly created signature.
"""
signature = securesystemslib.gpg.functions.create_signature(
self.signed.signable_bytes, gpg_keyid, gpg_home
)
self.signatures.append(signature)
return signature
|
def sign_gpg(self, gpg_keyid=None, gpg_home=None):
"""
<Purpose>
Signs the utf-8 encoded canonical JSON bytes of the Link or Layout object
contained in `self.signed` using `gpg.functions.gpg_sign_object` and
appends the created signature to `self.signatures`.
<Arguments>
gpg_keyid: (optional)
A gpg keyid, if omitted the default signing key is used
gpg_home: (optional)
The path to the gpg keyring, if omitted the default gpg keyring
is used
<Exceptions>
in_toto.gpg.exceptions.CommandError:
If the gpg signing command returned a non-zero exit code, e.g.
because the key has expired.
<Returns>
The dictionary representation of the newly created signature.
"""
signature = in_toto.gpg.functions.gpg_sign_object(
self.signed.signable_bytes, gpg_keyid, gpg_home
)
self.signatures.append(signature)
return signature
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def verify_signature(self, verification_key):
"""
<Purpose>
Verifies the signature, found in `self.signatures`, corresponding to the
passed verification key, or in case of GPG one of its subkeys, identified
by the key's keyid, using the passed verification key and the utf-8
encoded canonical JSON bytes of the Link or Layout object, contained in
`self.signed`.
If the signature matches securesystemslib.formats.GPG_SIGNATURE_SCHEMA,
`securesystemslib.gpg.functions.verify_signature` is used,
if the signature matches `securesystemslib.formats.SIGNATURE_SCHEMA`
`securesystemslib.keys.verify_signature`.
Note: In case of securesystemslib we actually pass the dictionary
representation of the data to be verified and
`securesystemslib.keys.verify_signature` converts it to
canonical JSON utf-8 encoded bytes before verifying the signature.
<Arguments>
verification_key:
Verifying key in the format:
securesystemslib.formats.ANY_VERIFICATION_KEY_SCHEMA
<Exceptions>
FormatError
If the passed key is not conformant with
securesystemslib.formats.ANY_VERIFICATION_KEY_SCHEMA
SignatureVerificationError
If the Metablock does not carry a signature signed with the
private key corresponding to the passed verification key or one
of its subkeys
If the signature corresponding to the passed verification key or
one of its subkeys does not match securesystemslib's or
securesystemslib.gpg's signature schema.
If the signature to be verified is malformed or invalid.
securesystemslib.gpg.exceptions.KeyExpirationError:
if the passed verification key is an expired gpg key
<Returns>
None.
"""
securesystemslib.formats.ANY_VERIFICATION_KEY_SCHEMA.check_match(verification_key)
verification_keyid = verification_key["keyid"]
# Find a signature that corresponds to the keyid of the passed
# verification key or one of its subkeys
signature = None
for signature in self.signatures:
if signature["keyid"] == verification_keyid:
break
if signature["keyid"] in list(verification_key.get("subkeys", {}).keys()):
break
else:
raise SignatureVerificationError(
"No signature found for key '{}'".format(verification_keyid)
)
if securesystemslib.formats.GPG_SIGNATURE_SCHEMA.matches(signature):
valid = securesystemslib.gpg.functions.verify_signature(
signature, verification_key, self.signed.signable_bytes
)
elif securesystemslib.formats.SIGNATURE_SCHEMA.matches(signature):
valid = securesystemslib.keys.verify_signature(
verification_key, signature, self.signed.signable_bytes
)
else:
valid = False
if not valid:
raise SignatureVerificationError(
"Invalid signature for keyid '{}'".format(verification_keyid)
)
|
def verify_signature(self, verification_key):
"""
<Purpose>
Verifies the signature, found in `self.signatures`, corresponding to the
passed verification key, or in case of GPG one of its subkeys, identified
by the key's keyid, using the passed verification key and the utf-8
encoded canonical JSON bytes of the Link or Layout object, contained in
`self.signed`.
If the signature matches `in_toto.gpg.formats.SIGNATURE_SCHEMA`,
`in_toto.gpg.functions.gpg_verify_signature` is used for verification,
if the signature matches `securesystemslib.formats.SIGNATURE_SCHEMA`
`securesystemslib.keys.verify_signature` is used.
Note: In case of securesystemslib we actually pass the dictionary
representation of the data to be verified and
`securesystemslib.keys.verify_signature` converts it to
canonical JSON utf-8 encoded bytes before verifying the signature.
<Arguments>
verification_key:
Verifying key in the format:
in_toto.formats.ANY_VERIFICATION_KEY_SCHEMA
<Exceptions>
FormatError
If the passed key is not conformant with
`in_toto.formats.ANY_VERIFICATION_KEY_SCHEMA`
SignatureVerificationError
If the Metablock does not carry a signature signed with the
private key corresponding to the passed verification key or one
of its subkeys
If the signature corresponding to the passed verification key or
one of its subkeys does not match securesystemslib's or
in_toto.gpg's signature schema.
If the signature to be verified is malformed or invalid.
in_toto.gpg.exceptions.KeyExpirationError:
if the passed verification key is an expired gpg key
<Returns>
None.
"""
in_toto.formats.ANY_VERIFICATION_KEY_SCHEMA.check_match(verification_key)
verification_keyid = verification_key["keyid"]
# Find a signature that corresponds to the keyid of the passed
# verification key or one of its subkeys
signature = None
for signature in self.signatures:
if signature["keyid"] == verification_keyid:
break
if signature["keyid"] in list(verification_key.get("subkeys", {}).keys()):
break
else:
raise SignatureVerificationError(
"No signature found for key '{}'".format(verification_keyid)
)
if in_toto.gpg.formats.SIGNATURE_SCHEMA.matches(signature):
valid = in_toto.gpg.functions.gpg_verify_signature(
signature, verification_key, self.signed.signable_bytes
)
elif securesystemslib.formats.SIGNATURE_SCHEMA.matches(signature):
valid = securesystemslib.keys.verify_signature(
verification_key, signature, self.signed.signable_bytes
)
else:
valid = False
if not valid:
raise SignatureVerificationError(
"Invalid signature for keyid '{}'".format(verification_keyid)
)
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def _validate_signatures(self):
"""Private method to check that the 'signatures' attribute is a list of
signatures in the format 'securesystemslib.formats.ANY_SIGNATURE_SCHEMA'.
"""
if not isinstance(self.signatures, list):
raise securesystemslib.exceptions.FormatError(
"The Metablock's 'signatures' property has to be of type 'list'."
)
for signature in self.signatures:
securesystemslib.formats.ANY_SIGNATURE_SCHEMA.check_match(signature)
|
def _validate_signatures(self):
"""Private method to check that the 'signatures' attribute is a list of
signatures in the format 'in_toto.formats.ANY_SIGNATURE_SCHEMA'."""
if not isinstance(self.signatures, list):
raise securesystemslib.exceptions.FormatError(
"The Metablock's 'signatures' property has to be of type 'list'."
)
for signature in self.signatures:
in_toto.formats.ANY_SIGNATURE_SCHEMA.check_match(signature)
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def unpack_rule(rule):
"""
Parses the rule and extracts and returns the necessary data to apply the
rule. Can also be used to verify if a rule complies with any of the formats
<Arguments>
rule:
The list of rule elements, in one of the following formats:
MATCH <pattern> [IN <source-path-prefix>] WITH (MATERIALS|PRODUCTS)
[IN <destination-path-prefix>] FROM <step>,
CREATE <pattern>,
DELETE <pattern>,
MODIFY <pattern>,
ALLOW <pattern>,
DISALLOW <pattern>,
REQUIRE <file>
Note that REQUIRE is somewhat of a weird animal that does not use patterns
but rather single filenames (for now).
<Exceptions>
raises FormatError, if the rule does not comply with any of the formats.
<Side Effects>
None.
<Returns>
A dictionary of the artifact rule data,
if it is a generic rule the dictionary is:
{
"rule_type": rule[0] ("CREATE"|"MODIFY"|"DELETE"|"ALLOW"|"DISALLOW")
"pattern" : rule[1], a path pattern
}
if it is a match rule, the dictionary is:
{
"rule_type": rule[0], ("MATCH"),
"pattern": rule[1], a path pattern
"source_prefix": path or empty string
"dest_prefix": path or empty string
"dest_type" : destination artifact type, ("MATERIAL"|"PRODUCT")
"dest_name": destination step/inspection name
}
"""
securesystemslib.formats.LIST_OF_ANY_STRING_SCHEMA.check_match(rule)
# Create all lower rule copy to case insensitively parse out tokens whose
# position we don't know yet
# We keep the original rule to retain the non-token elements' case
rule_lower = []
for rule_elem in rule:
rule_lower.append(rule_elem.lower())
rule_len = len(rule)
if rule_len < 2 or rule_lower[0] not in ALL_RULES:
raise securesystemslib.exceptions.FormatError(
"Wrong rule format,"
" rules must start with one of '{0}' and specify a 'pattern' as"
" second element.\n"
"Got: \n\t'{1}'".format(", ".join(ALL_RULES), rule)
)
rule_type = rule_lower[0]
pattern = rule[1]
# Type is one of "CREATE", "MODIFY", "DELETE", "ALLOW", "DISALLOW"
if rule_type in GENERIC_RULES:
# pylint: disable=no-else-raise
if rule_len != 2:
raise securesystemslib.exceptions.FormatError(
"Wrong rule format,"
" generic rules must have one of the formats:\n\t"
"CREATE <pattern>\n\t"
"MODIFY <pattern>\n\t"
"DELETE <pattern>\n\t"
"ALLOW <pattern>\n\t"
"DISALLOW <pattern>\n"
"REQUIRE <file>\n"
"Got:\n\t{}".format(rule)
)
else:
return {
"rule_type": rule_type,
"pattern": pattern,
}
# Type is "MATCH"
# NOTE: Can't reach `else` branch, if the rule is neither in GENERIC_RULES
# nor in COMPLEX_RULES an exception is raised earlier.
elif rule_type in COMPLEX_RULES: # pragma: no branch
# ... IN <source-path-prefix> WITH (MATERIALS|PRODUCTS)
# IN <destination-path-prefix> FROM <step>
if (
rule_len == 10
and rule_lower[2] == "in"
and rule_lower[4] == "with"
and rule_lower[6] == "in"
and rule_lower[8] == "from"
):
source_prefix = rule[3]
dest_type = rule_lower[5]
dest_prefix = rule[7]
dest_name = rule[9]
# ... IN <source-path-prefix> WITH (MATERIALS|PRODUCTS) FROM <step>
elif (
rule_len == 8
and rule_lower[2] == "in"
and rule_lower[4] == "with"
and rule_lower[6] == "from"
):
source_prefix = rule[3]
dest_type = rule_lower[5]
dest_prefix = ""
dest_name = rule[7]
# ... WITH (MATERIALS|PRODUCTS) IN <destination-path-prefix> FROM <step>
elif (
rule_len == 8
and rule_lower[2] == "with"
and rule_lower[4] == "in"
and rule_lower[6] == "from"
):
source_prefix = ""
dest_type = rule_lower[3]
dest_prefix = rule[5]
dest_name = rule[7]
# ... WITH (MATERIALS|PRODUCTS) FROM <step>
elif rule_len == 6 and rule_lower[2] == "with" and rule_lower[4] == "from":
source_prefix = ""
dest_type = rule_lower[3]
dest_prefix = ""
dest_name = rule[5]
else:
raise securesystemslib.exceptions.FormatError(
"Wrong rule format,"
" match rules must have the format:\n\t"
" MATCH <pattern> [IN <source-path-prefix>] WITH"
" (MATERIALS|PRODUCTS) [IN <destination-path-prefix>] FROM <step>.\n"
"Got: \n\t{}".format(rule)
)
if dest_type not in {"materials", "products"}:
raise securesystemslib.exceptions.FormatError(
"Wrong rule format,"
" match rules must have either MATERIALS or PRODUCTS (case"
" insensitive) as destination.\n"
"Got: \n\t{}".format(rule)
)
return {
"rule_type": rule_type,
"pattern": pattern,
"source_prefix": source_prefix,
"dest_prefix": dest_prefix,
"dest_type": dest_type,
"dest_name": dest_name,
}
|
def unpack_rule(rule):
"""
Parses the rule and extracts and returns the necessary data to apply the
rule. Can also be used to verify if a rule complies with any of the formats
<Arguments>
rule:
The list of rule elements, in one of the following formats:
MATCH <pattern> [IN <source-path-prefix>] WITH (MATERIALS|PRODUCTS)
[IN <destination-path-prefix>] FROM <step>,
CREATE <pattern>,
DELETE <pattern>,
MODIFY <pattern>,
ALLOW <pattern>,
DISALLOW <pattern>,
REQUIRE <file>
Note that REQUIRE is somewhat of a weird animal that does not use patterns
but rather single filenames (for now).
<Exceptions>
raises FormatError, if the rule does not comply with any of the formats.
<Side Effects>
None.
<Returns>
A dictionary of the artifact rule data,
if it is a generic rule the dictionary is:
{
"rule_type": rule[0] ("CREATE"|"MODIFY"|"DELETE"|"ALLOW"|"DISALLOW")
"pattern" : rule[1], a path pattern
}
if it is a match rule, the dictionary is:
{
"rule_type": rule[0], ("MATCH"),
"pattern": rule[1], a path pattern
"source_prefix": path or empty string
"dest_prefix": path or empty string
"dest_type" : destination artifact type, ("MATERIAL"|"PRODUCT")
"dest_name": destination step/inspection name
}
"""
in_toto.formats.LIST_OF_ANY_STRING_SCHEMA.check_match(rule)
# Create all lower rule copy to case insensitively parse out tokens whose
# position we don't know yet
# We keep the original rule to retain the non-token elements' case
rule_lower = []
for rule_elem in rule:
rule_lower.append(rule_elem.lower())
rule_len = len(rule)
if rule_len < 2 or rule_lower[0] not in ALL_RULES:
raise securesystemslib.exceptions.FormatError(
"Wrong rule format,"
" rules must start with one of '{0}' and specify a 'pattern' as"
" second element.\n"
"Got: \n\t'{1}'".format(", ".join(ALL_RULES), rule)
)
rule_type = rule_lower[0]
pattern = rule[1]
# Type is one of "CREATE", "MODIFY", "DELETE", "ALLOW", "DISALLOW"
if rule_type in GENERIC_RULES:
# pylint: disable=no-else-raise
if rule_len != 2:
raise securesystemslib.exceptions.FormatError(
"Wrong rule format,"
" generic rules must have one of the formats:\n\t"
"CREATE <pattern>\n\t"
"MODIFY <pattern>\n\t"
"DELETE <pattern>\n\t"
"ALLOW <pattern>\n\t"
"DISALLOW <pattern>\n"
"REQUIRE <file>\n"
"Got:\n\t{}".format(rule)
)
else:
return {
"rule_type": rule_type,
"pattern": pattern,
}
# Type is "MATCH"
# NOTE: Can't reach `else` branch, if the rule is neither in GENERIC_RULES
# nor in COMPLEX_RULES an exception is raised earlier.
elif rule_type in COMPLEX_RULES: # pragma: no branch
# ... IN <source-path-prefix> WITH (MATERIALS|PRODUCTS)
# IN <destination-path-prefix> FROM <step>
if (
rule_len == 10
and rule_lower[2] == "in"
and rule_lower[4] == "with"
and rule_lower[6] == "in"
and rule_lower[8] == "from"
):
source_prefix = rule[3]
dest_type = rule_lower[5]
dest_prefix = rule[7]
dest_name = rule[9]
# ... IN <source-path-prefix> WITH (MATERIALS|PRODUCTS) FROM <step>
elif (
rule_len == 8
and rule_lower[2] == "in"
and rule_lower[4] == "with"
and rule_lower[6] == "from"
):
source_prefix = rule[3]
dest_type = rule_lower[5]
dest_prefix = ""
dest_name = rule[7]
# ... WITH (MATERIALS|PRODUCTS) IN <destination-path-prefix> FROM <step>
elif (
rule_len == 8
and rule_lower[2] == "with"
and rule_lower[4] == "in"
and rule_lower[6] == "from"
):
source_prefix = ""
dest_type = rule_lower[3]
dest_prefix = rule[5]
dest_name = rule[7]
# ... WITH (MATERIALS|PRODUCTS) FROM <step>
elif rule_len == 6 and rule_lower[2] == "with" and rule_lower[4] == "from":
source_prefix = ""
dest_type = rule_lower[3]
dest_prefix = ""
dest_name = rule[5]
else:
raise securesystemslib.exceptions.FormatError(
"Wrong rule format,"
" match rules must have the format:\n\t"
" MATCH <pattern> [IN <source-path-prefix>] WITH"
" (MATERIALS|PRODUCTS) [IN <destination-path-prefix>] FROM <step>.\n"
"Got: \n\t{}".format(rule)
)
if dest_type not in {"materials", "products"}:
raise securesystemslib.exceptions.FormatError(
"Wrong rule format,"
" match rules must have either MATERIALS or PRODUCTS (case"
" insensitive) as destination.\n"
"Got: \n\t{}".format(rule)
)
return {
"rule_type": rule_type,
"pattern": pattern,
"source_prefix": source_prefix,
"dest_prefix": dest_prefix,
"dest_type": dest_type,
"dest_name": dest_name,
}
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def pack_rule(
rule_type,
pattern,
source_prefix=None,
dest_type=None,
dest_prefix=None,
dest_name=None,
):
"""
<Purpose>
Create an artifact rule in the passed arguments and return it as list
as it is stored in a step's or inspection's expected_material or
expected_product field.
<Arguments>
rule_type:
One of "MATCH", "CREATE", "DELETE", MODIFY, ALLOW, DISALLOW,
REQUIRE (case insensitive).
pattern:
A glob pattern to match artifact paths.
source_prefix: (only for "MATCH" rules)
A prefix for 'pattern' to match artifacts reported by the link
corresponding to the step that contains the rule.
dest_type: (only for "MATCH" rules)
One of "MATERIALS" or "PRODUCTS" (case insensitive) to specify
if materials or products of the link corresponding to the step
identified by 'dest_name' should be matched.
dest_prefix: (only for "MATCH" rules)
A prefix for 'pattern' to match artifacts reported by the link
corresponding to the step identified by 'dest_name'.
dest_name: (only for "MATCH" rules)
The name of the step whose corresponding link is used to match
artifacts.
<Exceptions>
securesystemslib.exceptions.FormatError
if any of the arguments is malformed.
<Returns>
One of the following rule formats in as a list
MATCH <pattern> [IN <source-path-prefix>] WITH (MATERIALS|PRODUCTS)
[IN <destination-path-prefix>] FROM <step>,
CREATE <pattern>,
DELETE <pattern>,
MODIFY <pattern>,
ALLOW <pattern>,
DISALLOW <pattern>,
REQUIRE <file>
Note that REQUIRE is somewhat of a weird animal that does not use patterns
but rather single filenames (for now).
"""
securesystemslib.formats.ANY_STRING_SCHEMA.check_match(rule_type)
securesystemslib.formats.ANY_STRING_SCHEMA.check_match(pattern)
if rule_type.lower() not in ALL_RULES:
raise securesystemslib.exceptions.FormatError(
"'{0}' is not a valid "
"'type'. Rule type must be one of: {1} (case insensitive).".format(
rule_type, ", ".join(ALL_RULES)
)
)
if rule_type.upper() == "MATCH":
if not securesystemslib.formats.ANY_STRING_SCHEMA.matches(dest_type) or not (
dest_type.lower() == "materials" or dest_type.lower() == "products"
):
raise securesystemslib.exceptions.FormatError(
"'{}' is not a valid"
" 'dest_type'. Rules of type 'MATCH' require a destination type of"
" either 'MATERIALS' or 'PRODUCTS' (case insensitive).".format(
dest_type
)
)
if not (
securesystemslib.formats.ANY_STRING_SCHEMA.matches(dest_name) and dest_name
):
raise securesystemslib.exceptions.FormatError(
"'{}' is not a valid"
" 'dest_name'. Rules of type 'MATCH' require a step name as a"
" destination name.".format(dest_name)
)
# Construct rule
rule = ["MATCH", pattern]
if source_prefix:
securesystemslib.formats.ANY_STRING_SCHEMA.check_match(source_prefix)
rule += ["IN", source_prefix]
rule += ["WITH", dest_type.upper()]
if dest_prefix:
securesystemslib.formats.ANY_STRING_SCHEMA.check_match(dest_prefix)
rule += ["IN", dest_prefix]
rule += ["FROM", dest_name]
else:
rule = [rule_type.upper(), pattern]
return rule
|
def pack_rule(
rule_type,
pattern,
source_prefix=None,
dest_type=None,
dest_prefix=None,
dest_name=None,
):
"""
<Purpose>
Create an artifact rule in the passed arguments and return it as list
as it is stored in a step's or inspection's expected_material or
expected_product field.
<Arguments>
rule_type:
One of "MATCH", "CREATE", "DELETE", MODIFY, ALLOW, DISALLOW,
REQUIRE (case insensitive).
pattern:
A glob pattern to match artifact paths.
source_prefix: (only for "MATCH" rules)
A prefix for 'pattern' to match artifacts reported by the link
corresponding to the step that contains the rule.
dest_type: (only for "MATCH" rules)
One of "MATERIALS" or "PRODUCTS" (case insensitive) to specify
if materials or products of the link corresponding to the step
identified by 'dest_name' should be matched.
dest_prefix: (only for "MATCH" rules)
A prefix for 'pattern' to match artifacts reported by the link
corresponding to the step identified by 'dest_name'.
dest_name: (only for "MATCH" rules)
The name of the step whose corresponding link is used to match
artifacts.
<Exceptions>
securesystemslib.exceptions.FormatError
if any of the arguments is malformed.
<Returns>
One of the following rule formats in as a list
MATCH <pattern> [IN <source-path-prefix>] WITH (MATERIALS|PRODUCTS)
[IN <destination-path-prefix>] FROM <step>,
CREATE <pattern>,
DELETE <pattern>,
MODIFY <pattern>,
ALLOW <pattern>,
DISALLOW <pattern>,
REQUIRE <file>
Note that REQUIRE is somewhat of a weird animal that does not use patterns
but rather single filenames (for now).
"""
in_toto.formats.ANY_STRING_SCHEMA.check_match(rule_type)
in_toto.formats.ANY_STRING_SCHEMA.check_match(pattern)
if rule_type.lower() not in ALL_RULES:
raise securesystemslib.exceptions.FormatError(
"'{0}' is not a valid "
"'type'. Rule type must be one of: {1} (case insensitive).".format(
rule_type, ", ".join(ALL_RULES)
)
)
if rule_type.upper() == "MATCH":
if not in_toto.formats.ANY_STRING_SCHEMA.matches(dest_type) or not (
dest_type.lower() == "materials" or dest_type.lower() == "products"
):
raise securesystemslib.exceptions.FormatError(
"'{}' is not a valid"
" 'dest_type'. Rules of type 'MATCH' require a destination type of"
" either 'MATERIALS' or 'PRODUCTS' (case insensitive).".format(
dest_type
)
)
if not (in_toto.formats.ANY_STRING_SCHEMA.matches(dest_name) and dest_name):
raise securesystemslib.exceptions.FormatError(
"'{}' is not a valid"
" 'dest_name'. Rules of type 'MATCH' require a step name as a"
" destination name.".format(dest_name)
)
# Construct rule
rule = ["MATCH", pattern]
if source_prefix:
in_toto.formats.ANY_STRING_SCHEMA.check_match(source_prefix)
rule += ["IN", source_prefix]
rule += ["WITH", dest_type.upper()]
if dest_prefix:
in_toto.formats.ANY_STRING_SCHEMA.check_match(dest_prefix)
rule += ["IN", dest_prefix]
rule += ["FROM", dest_name]
else:
rule = [rule_type.upper(), pattern]
return rule
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def execute_link(link_cmd_args, record_streams):
"""
<Purpose>
Executes the passed command plus arguments in a subprocess and returns
the return value of the executed command. If the specified standard output
and standard error of the command are recorded and also returned to the
caller.
<Arguments>
link_cmd_args:
A list where the first element is a command and the remaining
elements are arguments passed to that command.
record_streams:
A bool that specifies whether to redirect standard output and
and standard error to a temporary file which is returned to the
caller (True) or not (False).
<Exceptions>
TBA (see https://github.com/in-toto/in-toto/issues/6)
<Side Effects>
Executes passed command in a subprocess and redirects stdout and stderr
if specified.
<Returns>
- A dictionary containing standard output and standard error of the
executed command, called by-products.
Note: If record_streams is False, the dict values are empty strings.
- The return value of the executed command.
"""
if record_streams:
return_code, stdout_str, stderr_str = (
securesystemslib.process.run_duplicate_streams(link_cmd_args)
)
else:
process = securesystemslib.process.run(
link_cmd_args,
check=False,
stdout=securesystemslib.process.DEVNULL,
stderr=securesystemslib.process.DEVNULL,
)
stdout_str = stderr_str = ""
return_code = process.returncode
return {"stdout": stdout_str, "stderr": stderr_str, "return-value": return_code}
|
def execute_link(link_cmd_args, record_streams):
"""
<Purpose>
Executes the passed command plus arguments in a subprocess and returns
the return value of the executed command. If the specified standard output
and standard error of the command are recorded and also returned to the
caller.
<Arguments>
link_cmd_args:
A list where the first element is a command and the remaining
elements are arguments passed to that command.
record_streams:
A bool that specifies whether to redirect standard output and
and standard error to a temporary file which is returned to the
caller (True) or not (False).
<Exceptions>
TBA (see https://github.com/in-toto/in-toto/issues/6)
<Side Effects>
Executes passed command in a subprocess and redirects stdout and stderr
if specified.
<Returns>
- A dictionary containing standard output and standard error of the
executed command, called by-products.
Note: If record_streams is False, the dict values are empty strings.
- The return value of the executed command.
"""
if record_streams:
return_code, stdout_str, stderr_str = in_toto.process.run_duplicate_streams(
link_cmd_args
)
else:
process = in_toto.process.run(
link_cmd_args,
check=False,
stdout=in_toto.process.DEVNULL,
stderr=in_toto.process.DEVNULL,
)
stdout_str = stderr_str = ""
return_code = process.returncode
return {"stdout": stdout_str, "stderr": stderr_str, "return-value": return_code}
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def in_toto_run(
name,
material_list,
product_list,
link_cmd_args,
record_streams=False,
signing_key=None,
gpg_keyid=None,
gpg_use_default=False,
gpg_home=None,
exclude_patterns=None,
base_path=None,
compact_json=False,
record_environment=False,
normalize_line_endings=False,
lstrip_paths=None,
):
"""
<Purpose>
Calls functions in this module to run the command passed as link_cmd_args
argument and to store materials, products, by-products and environment
information into a link metadata file.
The link metadata file is signed either with the passed signing_key, or
a gpg key identified by the passed gpg_keyid or with the default gpg
key if gpg_use_default is True.
Even if multiple key parameters are passed, only one key is used for
signing (in above order of precedence).
The link file is dumped to `link.FILENAME_FORMAT` using the signing key's
keyid.
If no key parameter is passed the link is neither signed nor dumped.
<Arguments>
name:
A unique name to relate link metadata with a step or inspection
defined in the layout.
material_list:
List of file or directory paths that should be recorded as
materials.
product_list:
List of file or directory paths that should be recorded as
products.
link_cmd_args:
A list where the first element is a command and the remaining
elements are arguments passed to that command.
record_streams: (optional)
A bool that specifies whether to redirect standard output and
and standard error to a temporary file which is returned to the
caller (True) or not (False).
signing_key: (optional)
If not None, link metadata is signed with this key.
Format is securesystemslib.formats.KEY_SCHEMA
gpg_keyid: (optional)
If not None, link metadata is signed with a gpg key identified
by the passed keyid.
gpg_use_default: (optional)
If True, link metadata is signed with default gpg key.
gpg_home: (optional)
Path to GPG keyring (if not set the default keyring is used).
exclude_patterns: (optional)
Artifacts matched by the pattern are excluded from the materials
and products sections in the resulting link.
base_path: (optional)
If passed, record artifacts relative to base_path. Default is
current working directory.
NOTE: The base_path part of the recorded material is not included
in the resulting preliminary link's material/product sections.
compact_json: (optional)
Whether or not to use the most compact json representation.
record_environment: (optional)
if values such as workdir should be recorded on the environment
dictionary (false by default)
normalize_line_endings: (optional)
If True, replaces windows and mac line endings with unix line
endings before hashing materials and products, for cross-platform
support.
lstrip_paths: (optional)
If a prefix path is passed, the prefix is left stripped from
the path of every artifact that contains the prefix.
<Exceptions>
securesystemslib.FormatError if a signing_key is passed and does not match
securesystemslib.formats.KEY_SCHEMA or a gpg_keyid is passed and does
not match securesystemslib.formats.KEYID_SCHEMA or exclude_patterns
are passed and don't match securesystemslib.formats.NAMES_SCHEMA, or
base_path is passed and does not match
securesystemslib.formats.PATH_SCHEMA or is not a directory.
securesystemslib.gpg.exceptions.CommandError:
If gpg is used for signing and the command exits with a non-zero code.
<Side Effects>
If a key parameter is passed for signing, the newly created link metadata
file is written to disk using the filename scheme: `link.FILENAME_FORMAT`
<Returns>
Newly created Metablock object containing a Link object
"""
LOG.info("Running '{}'...".format(name))
# Check key formats to fail early
if signing_key:
_check_match_signing_key(signing_key)
if gpg_keyid:
securesystemslib.formats.KEYID_SCHEMA.check_match(gpg_keyid)
if exclude_patterns:
securesystemslib.formats.NAMES_SCHEMA.check_match(exclude_patterns)
if base_path:
securesystemslib.formats.PATH_SCHEMA.check_match(base_path)
if material_list:
LOG.info("Recording materials '{}'...".format(", ".join(material_list)))
materials_dict = record_artifacts_as_dict(
material_list,
exclude_patterns=exclude_patterns,
base_path=base_path,
follow_symlink_dirs=True,
normalize_line_endings=normalize_line_endings,
lstrip_paths=lstrip_paths,
)
if link_cmd_args:
LOG.info("Running command '{}'...".format(" ".join(link_cmd_args)))
byproducts = execute_link(link_cmd_args, record_streams)
else:
byproducts = {}
if product_list:
securesystemslib.formats.PATHS_SCHEMA.check_match(product_list)
LOG.info("Recording products '{}'...".format(", ".join(product_list)))
products_dict = record_artifacts_as_dict(
product_list,
exclude_patterns=exclude_patterns,
base_path=base_path,
follow_symlink_dirs=True,
normalize_line_endings=normalize_line_endings,
lstrip_paths=lstrip_paths,
)
LOG.info("Creating link metadata...")
environment = {}
if record_environment:
environment["workdir"] = os.getcwd().replace("\\", "/")
link = in_toto.models.link.Link(
name=name,
materials=materials_dict,
products=products_dict,
command=link_cmd_args,
byproducts=byproducts,
environment=environment,
)
link_metadata = Metablock(signed=link, compact_json=compact_json)
signature = None
if signing_key:
LOG.info("Signing link metadata using passed key...")
signature = link_metadata.sign(signing_key)
elif gpg_keyid:
LOG.info("Signing link metadata using passed GPG keyid...")
signature = link_metadata.sign_gpg(gpg_keyid, gpg_home=gpg_home)
elif gpg_use_default:
LOG.info("Signing link metadata using default GPG key ...")
signature = link_metadata.sign_gpg(gpg_keyid=None, gpg_home=gpg_home)
# We need the signature's keyid to write the link to keyid infix'ed filename
if signature:
signing_keyid = signature["keyid"]
filename = FILENAME_FORMAT.format(step_name=name, keyid=signing_keyid)
LOG.info("Storing link metadata to '{}'...".format(filename))
link_metadata.dump(filename)
return link_metadata
|
def in_toto_run(
name,
material_list,
product_list,
link_cmd_args,
record_streams=False,
signing_key=None,
gpg_keyid=None,
gpg_use_default=False,
gpg_home=None,
exclude_patterns=None,
base_path=None,
compact_json=False,
record_environment=False,
normalize_line_endings=False,
lstrip_paths=None,
):
"""
<Purpose>
Calls functions in this module to run the command passed as link_cmd_args
argument and to store materials, products, by-products and environment
information into a link metadata file.
The link metadata file is signed either with the passed signing_key, or
a gpg key identified by the passed gpg_keyid or with the default gpg
key if gpg_use_default is True.
Even if multiple key parameters are passed, only one key is used for
signing (in above order of precedence).
The link file is dumped to `link.FILENAME_FORMAT` using the signing key's
keyid.
If no key parameter is passed the link is neither signed nor dumped.
<Arguments>
name:
A unique name to relate link metadata with a step or inspection
defined in the layout.
material_list:
List of file or directory paths that should be recorded as
materials.
product_list:
List of file or directory paths that should be recorded as
products.
link_cmd_args:
A list where the first element is a command and the remaining
elements are arguments passed to that command.
record_streams: (optional)
A bool that specifies whether to redirect standard output and
and standard error to a temporary file which is returned to the
caller (True) or not (False).
signing_key: (optional)
If not None, link metadata is signed with this key.
Format is securesystemslib.formats.KEY_SCHEMA
gpg_keyid: (optional)
If not None, link metadata is signed with a gpg key identified
by the passed keyid.
gpg_use_default: (optional)
If True, link metadata is signed with default gpg key.
gpg_home: (optional)
Path to GPG keyring (if not set the default keyring is used).
exclude_patterns: (optional)
Artifacts matched by the pattern are excluded from the materials
and products sections in the resulting link.
base_path: (optional)
If passed, record artifacts relative to base_path. Default is
current working directory.
NOTE: The base_path part of the recorded material is not included
in the resulting preliminary link's material/product sections.
compact_json: (optional)
Whether or not to use the most compact json representation.
record_environment: (optional)
if values such as workdir should be recorded on the environment
dictionary (false by default)
normalize_line_endings: (optional)
If True, replaces windows and mac line endings with unix line
endings before hashing materials and products, for cross-platform
support.
lstrip_paths: (optional)
If a prefix path is passed, the prefix is left stripped from
the path of every artifact that contains the prefix.
<Exceptions>
securesystemslib.FormatError if a signing_key is passed and does not match
securesystemslib.formats.KEY_SCHEMA or a gpg_keyid is passed and does
not match securesystemslib.formats.KEYID_SCHEMA or exclude_patterns
are passed and don't match securesystemslib.formats.NAMES_SCHEMA, or
base_path is passed and does not match
securesystemslib.formats.PATH_SCHEMA or is not a directory.
in_toto.gpg.exceptions.CommandError:
If gpg is used for signing and the command exits with a non-zero code.
<Side Effects>
If a key parameter is passed for signing, the newly created link metadata
file is written to disk using the filename scheme: `link.FILENAME_FORMAT`
<Returns>
Newly created Metablock object containing a Link object
"""
LOG.info("Running '{}'...".format(name))
# Check key formats to fail early
if signing_key:
_check_match_signing_key(signing_key)
if gpg_keyid:
securesystemslib.formats.KEYID_SCHEMA.check_match(gpg_keyid)
if exclude_patterns:
securesystemslib.formats.NAMES_SCHEMA.check_match(exclude_patterns)
if base_path:
securesystemslib.formats.PATH_SCHEMA.check_match(base_path)
if material_list:
LOG.info("Recording materials '{}'...".format(", ".join(material_list)))
materials_dict = record_artifacts_as_dict(
material_list,
exclude_patterns=exclude_patterns,
base_path=base_path,
follow_symlink_dirs=True,
normalize_line_endings=normalize_line_endings,
lstrip_paths=lstrip_paths,
)
if link_cmd_args:
LOG.info("Running command '{}'...".format(" ".join(link_cmd_args)))
byproducts = execute_link(link_cmd_args, record_streams)
else:
byproducts = {}
if product_list:
securesystemslib.formats.PATHS_SCHEMA.check_match(product_list)
LOG.info("Recording products '{}'...".format(", ".join(product_list)))
products_dict = record_artifacts_as_dict(
product_list,
exclude_patterns=exclude_patterns,
base_path=base_path,
follow_symlink_dirs=True,
normalize_line_endings=normalize_line_endings,
lstrip_paths=lstrip_paths,
)
LOG.info("Creating link metadata...")
environment = {}
if record_environment:
environment["workdir"] = os.getcwd().replace("\\", "/")
link = in_toto.models.link.Link(
name=name,
materials=materials_dict,
products=products_dict,
command=link_cmd_args,
byproducts=byproducts,
environment=environment,
)
link_metadata = Metablock(signed=link, compact_json=compact_json)
signature = None
if signing_key:
LOG.info("Signing link metadata using passed key...")
signature = link_metadata.sign(signing_key)
elif gpg_keyid:
LOG.info("Signing link metadata using passed GPG keyid...")
signature = link_metadata.sign_gpg(gpg_keyid, gpg_home=gpg_home)
elif gpg_use_default:
LOG.info("Signing link metadata using default GPG key ...")
signature = link_metadata.sign_gpg(gpg_keyid=None, gpg_home=gpg_home)
# We need the signature's keyid to write the link to keyid infix'ed filename
if signature:
signing_keyid = signature["keyid"]
filename = FILENAME_FORMAT.format(step_name=name, keyid=signing_keyid)
LOG.info("Storing link metadata to '{}'...".format(filename))
link_metadata.dump(filename)
return link_metadata
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def in_toto_record_start(
step_name,
material_list,
signing_key=None,
gpg_keyid=None,
gpg_use_default=False,
gpg_home=None,
exclude_patterns=None,
base_path=None,
record_environment=False,
normalize_line_endings=False,
lstrip_paths=None,
):
"""
<Purpose>
Starts creating link metadata for a multi-part in-toto step. I.e.
records passed materials, creates link meta data object from it, signs it
with passed signing_key, gpg key identified by the passed gpg_keyid
or the default gpg key and stores it to disk under
UNFINISHED_FILENAME_FORMAT.
One of signing_key, gpg_keyid or gpg_use_default has to be passed.
<Arguments>
step_name:
A unique name to relate link metadata with a step defined in the
layout.
material_list:
List of file or directory paths that should be recorded as
materials.
signing_key: (optional)
If not None, link metadata is signed with this key.
Format is securesystemslib.formats.KEY_SCHEMA
gpg_keyid: (optional)
If not None, link metadata is signed with a gpg key identified
by the passed keyid.
gpg_use_default: (optional)
If True, link metadata is signed with default gpg key.
gpg_home: (optional)
Path to GPG keyring (if not set the default keyring is used).
exclude_patterns: (optional)
Artifacts matched by the pattern are excluded from the materials
section in the resulting preliminary link.
base_path: (optional)
If passed, record materials relative to base_path. Default is
current working directory.
NOTE: The base_path part of the recorded materials is not included
in the resulting preliminary link's material section.
record_environment: (optional)
if values such as workdir should be recorded on the environment
dictionary (false by default)
normalize_line_endings: (optional)
If True, replaces windows and mac line endings with unix line
endings before hashing materials, for cross-platform support.
lstrip_paths: (optional)
If a prefix path is passed, the prefix is left stripped from
the path of every artifact that contains the prefix.
<Exceptions>
ValueError if none of signing_key, gpg_keyid or gpg_use_default=True
is passed.
securesystemslib.FormatError if a signing_key is passed and does not match
securesystemslib.formats.KEY_SCHEMA or a gpg_keyid is passed and does
not match securesystemslib.formats.KEYID_SCHEMA or exclude_patterns
are passed and don't match securesystemslib.formats.NAMES_SCHEMA, or
base_path is passed and does not match
securesystemslib.formats.PATH_SCHEMA or is not a directory.
securesystemslib.gpg.exceptions.CommandError:
If gpg is used for signing and the command exits with a non-zero code.
<Side Effects>
Writes newly created link metadata file to disk using the filename scheme
from link.UNFINISHED_FILENAME_FORMAT
<Returns>
None.
"""
LOG.info("Start recording '{}'...".format(step_name))
# Fail if there is no signing key arg at all
if not signing_key and not gpg_keyid and not gpg_use_default:
raise ValueError(
"Pass either a signing key, a gpg keyid or set gpg_use_default to True!"
)
# Check key formats to fail early
if signing_key:
_check_match_signing_key(signing_key)
if gpg_keyid:
securesystemslib.formats.KEYID_SCHEMA.check_match(gpg_keyid)
if exclude_patterns:
securesystemslib.formats.NAMES_SCHEMA.check_match(exclude_patterns)
if base_path:
securesystemslib.formats.PATH_SCHEMA.check_match(base_path)
if material_list:
LOG.info("Recording materials '{}'...".format(", ".join(material_list)))
materials_dict = record_artifacts_as_dict(
material_list,
exclude_patterns=exclude_patterns,
base_path=base_path,
follow_symlink_dirs=True,
normalize_line_endings=normalize_line_endings,
lstrip_paths=lstrip_paths,
)
LOG.info("Creating preliminary link metadata...")
environment = {}
if record_environment:
environment["workdir"] = os.getcwd().replace("\\", "/")
link = in_toto.models.link.Link(
name=step_name,
materials=materials_dict,
products={},
command=[],
byproducts={},
environment=environment,
)
link_metadata = Metablock(signed=link)
if signing_key:
LOG.info("Signing link metadata using passed key...")
signature = link_metadata.sign(signing_key)
elif gpg_keyid:
LOG.info("Signing link metadata using passed GPG keyid...")
signature = link_metadata.sign_gpg(gpg_keyid, gpg_home=gpg_home)
else: # (gpg_use_default)
LOG.info("Signing link metadata using default GPG key ...")
signature = link_metadata.sign_gpg(gpg_keyid=None, gpg_home=gpg_home)
# We need the signature's keyid to write the link to keyid infix'ed filename
signing_keyid = signature["keyid"]
unfinished_fn = UNFINISHED_FILENAME_FORMAT.format(
step_name=step_name, keyid=signing_keyid
)
LOG.info("Storing preliminary link metadata to '{}'...".format(unfinished_fn))
link_metadata.dump(unfinished_fn)
|
def in_toto_record_start(
step_name,
material_list,
signing_key=None,
gpg_keyid=None,
gpg_use_default=False,
gpg_home=None,
exclude_patterns=None,
base_path=None,
record_environment=False,
normalize_line_endings=False,
lstrip_paths=None,
):
"""
<Purpose>
Starts creating link metadata for a multi-part in-toto step. I.e.
records passed materials, creates link meta data object from it, signs it
with passed signing_key, gpg key identified by the passed gpg_keyid
or the default gpg key and stores it to disk under
UNFINISHED_FILENAME_FORMAT.
One of signing_key, gpg_keyid or gpg_use_default has to be passed.
<Arguments>
step_name:
A unique name to relate link metadata with a step defined in the
layout.
material_list:
List of file or directory paths that should be recorded as
materials.
signing_key: (optional)
If not None, link metadata is signed with this key.
Format is securesystemslib.formats.KEY_SCHEMA
gpg_keyid: (optional)
If not None, link metadata is signed with a gpg key identified
by the passed keyid.
gpg_use_default: (optional)
If True, link metadata is signed with default gpg key.
gpg_home: (optional)
Path to GPG keyring (if not set the default keyring is used).
exclude_patterns: (optional)
Artifacts matched by the pattern are excluded from the materials
section in the resulting preliminary link.
base_path: (optional)
If passed, record materials relative to base_path. Default is
current working directory.
NOTE: The base_path part of the recorded materials is not included
in the resulting preliminary link's material section.
record_environment: (optional)
if values such as workdir should be recorded on the environment
dictionary (false by default)
normalize_line_endings: (optional)
If True, replaces windows and mac line endings with unix line
endings before hashing materials, for cross-platform support.
lstrip_paths: (optional)
If a prefix path is passed, the prefix is left stripped from
the path of every artifact that contains the prefix.
<Exceptions>
ValueError if none of signing_key, gpg_keyid or gpg_use_default=True
is passed.
securesystemslib.FormatError if a signing_key is passed and does not match
securesystemslib.formats.KEY_SCHEMA or a gpg_keyid is passed and does
not match securesystemslib.formats.KEYID_SCHEMA or exclude_patterns
are passed and don't match securesystemslib.formats.NAMES_SCHEMA, or
base_path is passed and does not match
securesystemslib.formats.PATH_SCHEMA or is not a directory.
in_toto.gpg.exceptions.CommandError:
If gpg is used for signing and the command exits with a non-zero code.
<Side Effects>
Writes newly created link metadata file to disk using the filename scheme
from link.UNFINISHED_FILENAME_FORMAT
<Returns>
None.
"""
LOG.info("Start recording '{}'...".format(step_name))
# Fail if there is no signing key arg at all
if not signing_key and not gpg_keyid and not gpg_use_default:
raise ValueError(
"Pass either a signing key, a gpg keyid or set gpg_use_default to True!"
)
# Check key formats to fail early
if signing_key:
_check_match_signing_key(signing_key)
if gpg_keyid:
securesystemslib.formats.KEYID_SCHEMA.check_match(gpg_keyid)
if exclude_patterns:
securesystemslib.formats.NAMES_SCHEMA.check_match(exclude_patterns)
if base_path:
securesystemslib.formats.PATH_SCHEMA.check_match(base_path)
if material_list:
LOG.info("Recording materials '{}'...".format(", ".join(material_list)))
materials_dict = record_artifacts_as_dict(
material_list,
exclude_patterns=exclude_patterns,
base_path=base_path,
follow_symlink_dirs=True,
normalize_line_endings=normalize_line_endings,
lstrip_paths=lstrip_paths,
)
LOG.info("Creating preliminary link metadata...")
environment = {}
if record_environment:
environment["workdir"] = os.getcwd().replace("\\", "/")
link = in_toto.models.link.Link(
name=step_name,
materials=materials_dict,
products={},
command=[],
byproducts={},
environment=environment,
)
link_metadata = Metablock(signed=link)
if signing_key:
LOG.info("Signing link metadata using passed key...")
signature = link_metadata.sign(signing_key)
elif gpg_keyid:
LOG.info("Signing link metadata using passed GPG keyid...")
signature = link_metadata.sign_gpg(gpg_keyid, gpg_home=gpg_home)
else: # (gpg_use_default)
LOG.info("Signing link metadata using default GPG key ...")
signature = link_metadata.sign_gpg(gpg_keyid=None, gpg_home=gpg_home)
# We need the signature's keyid to write the link to keyid infix'ed filename
signing_keyid = signature["keyid"]
unfinished_fn = UNFINISHED_FILENAME_FORMAT.format(
step_name=step_name, keyid=signing_keyid
)
LOG.info("Storing preliminary link metadata to '{}'...".format(unfinished_fn))
link_metadata.dump(unfinished_fn)
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def in_toto_record_stop(
step_name,
product_list,
signing_key=None,
gpg_keyid=None,
gpg_use_default=False,
gpg_home=None,
exclude_patterns=None,
base_path=None,
normalize_line_endings=False,
lstrip_paths=None,
):
"""
<Purpose>
Finishes creating link metadata for a multi-part in-toto step.
Loads unfinished link metadata file from disk, verifies
that the file was signed with either the passed signing key, a gpg key
identified by the passed gpg_keyid or the default gpg key.
Then records products, updates unfinished Link object
(products and signature), removes unfinished link file from and
stores new link file to disk.
One of signing_key, gpg_keyid or gpg_use_default has to be passed and it
needs to be the same that was used with preceding in_toto_record_start.
<Arguments>
step_name:
A unique name to relate link metadata with a step defined in the
layout.
product_list:
List of file or directory paths that should be recorded as
products.
signing_key: (optional)
If not None, link metadata is signed with this key.
Format is securesystemslib.formats.KEY_SCHEMA
gpg_keyid: (optional)
If not None, link metadata is signed with a gpg key identified
by the passed keyid.
gpg_use_default: (optional)
If True, link metadata is signed with default gpg key.
gpg_home: (optional)
Path to GPG keyring (if not set the default keyring is used).
exclude_patterns: (optional)
Artifacts matched by the pattern are excluded from the products
sections in the resulting link.
base_path: (optional)
If passed, record products relative to base_path. Default is
current working directory.
NOTE: The base_path part of the recorded products is not included
in the resulting preliminary link's product section.
normalize_line_endings: (optional)
If True, replaces windows and mac line endings with unix line
endings before hashing products, for cross-platform support.
lstrip_paths: (optional)
If a prefix path is passed, the prefix is left stripped from
the path of every artifact that contains the prefix.
<Exceptions>
ValueError if none of signing_key, gpg_keyid or gpg_use_default=True
is passed.
securesystemslib.FormatError if a signing_key is passed and does not match
securesystemslib.formats.KEY_SCHEMA or a gpg_keyid is passed and does
not match securesystemslib.formats.KEYID_SCHEMA, or exclude_patterns
are passed and don't match securesystemslib.formats.NAMES_SCHEMA, or
base_path is passed and does not match
securesystemslib.formats.PATH_SCHEMA or is not a directory.
LinkNotFoundError if gpg is used for signing and the corresponding
preliminary link file can not be found in the current working directory
SignatureVerificationError:
If the signature of the preliminary link file is invalid.
securesystemslib.gpg.exceptions.KeyExpirationError:
If the key used to verify the signature of the preliminary link file is
an expired gpg key.
securesystemslib.gpg.exceptions.CommandError:
If gpg is used for signing and the command exits with a non-zero code.
<Side Effects>
Writes newly created link metadata file to disk using the filename scheme
from link.FILENAME_FORMAT
Removes unfinished link file link.UNFINISHED_FILENAME_FORMAT from disk
<Returns>
None.
"""
LOG.info("Stop recording '{}'...".format(step_name))
# Check that we have something to sign and if the formats are right
if not signing_key and not gpg_keyid and not gpg_use_default:
raise ValueError(
"Pass either a signing key, a gpg keyid or set gpg_use_default to True"
)
if signing_key:
_check_match_signing_key(signing_key)
if gpg_keyid:
securesystemslib.formats.KEYID_SCHEMA.check_match(gpg_keyid)
if exclude_patterns:
securesystemslib.formats.NAMES_SCHEMA.check_match(exclude_patterns)
if base_path:
securesystemslib.formats.PATH_SCHEMA.check_match(base_path)
# Load preliminary link file
# If we have a signing key we can use the keyid to construct the name
if signing_key:
unfinished_fn = UNFINISHED_FILENAME_FORMAT.format(
step_name=step_name, keyid=signing_key["keyid"]
)
# FIXME: Currently there is no way to know the default GPG key's keyid and
# so we glob for preliminary link files
else:
unfinished_fn_glob = UNFINISHED_FILENAME_FORMAT_GLOB.format(
step_name=step_name, pattern="*"
)
unfinished_fn_list = glob.glob(unfinished_fn_glob)
if not len(unfinished_fn_list):
raise in_toto.exceptions.LinkNotFoundError(
"Could not find a preliminary"
" link for step '{}' in the current working directory.".format(
step_name
)
)
if len(unfinished_fn_list) > 1:
raise in_toto.exceptions.LinkNotFoundError(
"Found more than one"
" preliminary links for step '{}' in the current working directory:"
" {}. We need exactly one to stop recording.".format(
step_name, ", ".join(unfinished_fn_list)
)
)
unfinished_fn = unfinished_fn_list[0]
LOG.info("Loading preliminary link metadata '{}'...".format(unfinished_fn))
link_metadata = Metablock.load(unfinished_fn)
# The file must have been signed by the same key
# If we have a signing_key we use it for verification as well
if signing_key:
LOG.info("Verifying preliminary link signature using passed signing key...")
keyid = signing_key["keyid"]
verification_key = signing_key
elif gpg_keyid:
LOG.info("Verifying preliminary link signature using passed gpg key...")
gpg_pubkey = securesystemslib.gpg.functions.export_pubkey(gpg_keyid, gpg_home)
keyid = gpg_pubkey["keyid"]
verification_key = gpg_pubkey
else: # must be gpg_use_default
# FIXME: Currently there is no way to know the default GPG key's keyid
# before signing. As a workaround we extract the keyid of the preliminary
# Link file's signature and try to export a pubkey from the gpg
# keyring. We do this even if a gpg_keyid was specified, because gpg
# accepts many different ids (mail, name, parts of an id, ...) but we
# need a specific format.
LOG.info("Verifying preliminary link signature using default gpg key...")
keyid = link_metadata.signatures[0]["keyid"]
gpg_pubkey = securesystemslib.gpg.functions.export_pubkey(keyid, gpg_home)
verification_key = gpg_pubkey
link_metadata.verify_signature(verification_key)
# Record products if a product path list was passed
if product_list:
LOG.info("Recording products '{}'...".format(", ".join(product_list)))
link_metadata.signed.products = record_artifacts_as_dict(
product_list,
exclude_patterns=exclude_patterns,
base_path=base_path,
follow_symlink_dirs=True,
normalize_line_endings=normalize_line_endings,
lstrip_paths=lstrip_paths,
)
link_metadata.signatures = []
if signing_key:
LOG.info("Updating signature with key '{:.8}...'...".format(keyid))
link_metadata.sign(signing_key)
else: # gpg_keyid or gpg_use_default
# In both cases we use the keyid we got from verifying the preliminary
# link signature above.
LOG.info("Updating signature with gpg key '{:.8}...'...".format(keyid))
link_metadata.sign_gpg(keyid, gpg_home)
fn = FILENAME_FORMAT.format(step_name=step_name, keyid=keyid)
LOG.info("Storing link metadata to '{}'...".format(fn))
link_metadata.dump(fn)
LOG.info("Removing unfinished link metadata '{}'...".format(unfinished_fn))
os.remove(unfinished_fn)
|
def in_toto_record_stop(
step_name,
product_list,
signing_key=None,
gpg_keyid=None,
gpg_use_default=False,
gpg_home=None,
exclude_patterns=None,
base_path=None,
normalize_line_endings=False,
lstrip_paths=None,
):
"""
<Purpose>
Finishes creating link metadata for a multi-part in-toto step.
Loads unfinished link metadata file from disk, verifies
that the file was signed with either the passed signing key, a gpg key
identified by the passed gpg_keyid or the default gpg key.
Then records products, updates unfinished Link object
(products and signature), removes unfinished link file from and
stores new link file to disk.
One of signing_key, gpg_keyid or gpg_use_default has to be passed and it
needs to be the same that was used with preceding in_toto_record_start.
<Arguments>
step_name:
A unique name to relate link metadata with a step defined in the
layout.
product_list:
List of file or directory paths that should be recorded as
products.
signing_key: (optional)
If not None, link metadata is signed with this key.
Format is securesystemslib.formats.KEY_SCHEMA
gpg_keyid: (optional)
If not None, link metadata is signed with a gpg key identified
by the passed keyid.
gpg_use_default: (optional)
If True, link metadata is signed with default gpg key.
gpg_home: (optional)
Path to GPG keyring (if not set the default keyring is used).
exclude_patterns: (optional)
Artifacts matched by the pattern are excluded from the products
sections in the resulting link.
base_path: (optional)
If passed, record products relative to base_path. Default is
current working directory.
NOTE: The base_path part of the recorded products is not included
in the resulting preliminary link's product section.
normalize_line_endings: (optional)
If True, replaces windows and mac line endings with unix line
endings before hashing products, for cross-platform support.
lstrip_paths: (optional)
If a prefix path is passed, the prefix is left stripped from
the path of every artifact that contains the prefix.
<Exceptions>
ValueError if none of signing_key, gpg_keyid or gpg_use_default=True
is passed.
securesystemslib.FormatError if a signing_key is passed and does not match
securesystemslib.formats.KEY_SCHEMA or a gpg_keyid is passed and does
not match securesystemslib.formats.KEYID_SCHEMA, or exclude_patterns
are passed and don't match securesystemslib.formats.NAMES_SCHEMA, or
base_path is passed and does not match
securesystemslib.formats.PATH_SCHEMA or is not a directory.
LinkNotFoundError if gpg is used for signing and the corresponding
preliminary link file can not be found in the current working directory
SignatureVerificationError:
If the signature of the preliminary link file is invalid.
in_toto.gpg.exceptions.KeyExpirationError:
If the key used to verify the signature of the preliminary link file is
an expired gpg key.
in_toto.gpg.exceptions.CommandError:
If gpg is used for signing and the command exits with a non-zero code.
<Side Effects>
Writes newly created link metadata file to disk using the filename scheme
from link.FILENAME_FORMAT
Removes unfinished link file link.UNFINISHED_FILENAME_FORMAT from disk
<Returns>
None.
"""
LOG.info("Stop recording '{}'...".format(step_name))
# Check that we have something to sign and if the formats are right
if not signing_key and not gpg_keyid and not gpg_use_default:
raise ValueError(
"Pass either a signing key, a gpg keyid or set gpg_use_default to True"
)
if signing_key:
_check_match_signing_key(signing_key)
if gpg_keyid:
securesystemslib.formats.KEYID_SCHEMA.check_match(gpg_keyid)
if exclude_patterns:
securesystemslib.formats.NAMES_SCHEMA.check_match(exclude_patterns)
if base_path:
securesystemslib.formats.PATH_SCHEMA.check_match(base_path)
# Load preliminary link file
# If we have a signing key we can use the keyid to construct the name
if signing_key:
unfinished_fn = UNFINISHED_FILENAME_FORMAT.format(
step_name=step_name, keyid=signing_key["keyid"]
)
# FIXME: Currently there is no way to know the default GPG key's keyid and
# so we glob for preliminary link files
else:
unfinished_fn_glob = UNFINISHED_FILENAME_FORMAT_GLOB.format(
step_name=step_name, pattern="*"
)
unfinished_fn_list = glob.glob(unfinished_fn_glob)
if not len(unfinished_fn_list):
raise in_toto.exceptions.LinkNotFoundError(
"Could not find a preliminary"
" link for step '{}' in the current working directory.".format(
step_name
)
)
if len(unfinished_fn_list) > 1:
raise in_toto.exceptions.LinkNotFoundError(
"Found more than one"
" preliminary links for step '{}' in the current working directory:"
" {}. We need exactly one to stop recording.".format(
step_name, ", ".join(unfinished_fn_list)
)
)
unfinished_fn = unfinished_fn_list[0]
LOG.info("Loading preliminary link metadata '{}'...".format(unfinished_fn))
link_metadata = Metablock.load(unfinished_fn)
# The file must have been signed by the same key
# If we have a signing_key we use it for verification as well
if signing_key:
LOG.info("Verifying preliminary link signature using passed signing key...")
keyid = signing_key["keyid"]
verification_key = signing_key
elif gpg_keyid:
LOG.info("Verifying preliminary link signature using passed gpg key...")
gpg_pubkey = in_toto.gpg.functions.gpg_export_pubkey(gpg_keyid, gpg_home)
keyid = gpg_pubkey["keyid"]
verification_key = gpg_pubkey
else: # must be gpg_use_default
# FIXME: Currently there is no way to know the default GPG key's keyid
# before signing. As a workaround we extract the keyid of the preliminary
# Link file's signature and try to export a pubkey from the gpg
# keyring. We do this even if a gpg_keyid was specified, because gpg
# accepts many different ids (mail, name, parts of an id, ...) but we
# need a specific format.
LOG.info("Verifying preliminary link signature using default gpg key...")
keyid = link_metadata.signatures[0]["keyid"]
gpg_pubkey = in_toto.gpg.functions.gpg_export_pubkey(keyid, gpg_home)
verification_key = gpg_pubkey
link_metadata.verify_signature(verification_key)
# Record products if a product path list was passed
if product_list:
LOG.info("Recording products '{}'...".format(", ".join(product_list)))
link_metadata.signed.products = record_artifacts_as_dict(
product_list,
exclude_patterns=exclude_patterns,
base_path=base_path,
follow_symlink_dirs=True,
normalize_line_endings=normalize_line_endings,
lstrip_paths=lstrip_paths,
)
link_metadata.signatures = []
if signing_key:
LOG.info("Updating signature with key '{:.8}...'...".format(keyid))
link_metadata.sign(signing_key)
else: # gpg_keyid or gpg_use_default
# In both cases we use the keyid we got from verifying the preliminary
# link signature above.
LOG.info("Updating signature with gpg key '{:.8}...'...".format(keyid))
link_metadata.sign_gpg(keyid, gpg_home)
fn = FILENAME_FORMAT.format(step_name=step_name, keyid=keyid)
LOG.info("Storing link metadata to '{}'...".format(fn))
link_metadata.dump(fn)
LOG.info("Removing unfinished link metadata '{}'...".format(unfinished_fn))
os.remove(unfinished_fn)
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def import_gpg_public_keys_from_keyring_as_dict(keyids, gpg_home=False):
"""Creates a dictionary of gpg public keys retrieving gpg public keys
identified by the list of passed `keyids` from the gpg keyring at `gpg_home`.
If `gpg_home` is False the default keyring is used."""
key_dict = {}
for gpg_keyid in keyids:
pub_key = securesystemslib.gpg.functions.export_pubkey(
gpg_keyid, homedir=gpg_home
)
securesystemslib.formats.GPG_PUBKEY_SCHEMA.check_match(pub_key)
keyid = pub_key["keyid"]
key_dict[keyid] = pub_key
return key_dict
|
def import_gpg_public_keys_from_keyring_as_dict(keyids, gpg_home=False):
"""Creates a dictionary of gpg public keys retrieving gpg public keys
identified by the list of passed `keyids` from the gpg keyring at `gpg_home`.
If `gpg_home` is False the default keyring is used."""
key_dict = {}
for gpg_keyid in keyids:
pub_key = in_toto.gpg.functions.gpg_export_pubkey(gpg_keyid, homedir=gpg_home)
in_toto.gpg.formats.PUBKEY_SCHEMA.check_match(pub_key)
keyid = pub_key["keyid"]
key_dict[keyid] = pub_key
return key_dict
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def verify_layout_signatures(layout_metablock, keys_dict):
"""
<Purpose>
Iteratively verifies the signatures of a Metablock object containing
a Layout object for every verification key in the passed keys dictionary.
Requires at least one key to be passed and requires every passed key to
find a valid signature.
<Arguments>
layout_metablock:
A Metablock object containing a Layout whose signatures are
verified.
keys_dict:
A dictionary of keys to verify the signatures conformant with
securesystemslib.formats.VERIFICATION_KEY_DICT_SCHEMA.
<Exceptions>
securesystemslib.exceptions.FormatError
if the passed key dict does not match VERIFICATION_KEY_DICT_SCHEMA.
SignatureVerificationError
if an empty verification key dictionary was passed, or
if any of the passed verification keys fails to verify a signature.
securesystemslib.gpg.exceptions.KeyExpirationError:
if any of the passed verification keys is an expired gpg key
"""
securesystemslib.formats.VERIFICATION_KEY_DICT_SCHEMA.check_match(keys_dict)
# Fail if an empty verification key dictionary was passed
if len(keys_dict) < 1:
raise SignatureVerificationError(
"Layout signature verification requires at least one key."
)
# Fail if any of the passed keys can't verify a signature on the Layout
for junk, verify_key in six.iteritems(keys_dict):
layout_metablock.verify_signature(verify_key)
|
def verify_layout_signatures(layout_metablock, keys_dict):
"""
<Purpose>
Iteratively verifies the signatures of a Metablock object containing
a Layout object for every verification key in the passed keys dictionary.
Requires at least one key to be passed and requires every passed key to
find a valid signature.
<Arguments>
layout_metablock:
A Metablock object containing a Layout whose signatures are
verified.
keys_dict:
A dictionary of keys to verify the signatures conformant with
securesystemslib.formats.ANY_VERIFICATION_KEY_DICT_SCHEMA.
<Exceptions>
securesystemslib.exceptions.FormatError
if the passed key dict does not match ANY_VERIFICATION_KEY_DICT_SCHEMA.
SignatureVerificationError
if an empty verification key dictionary was passed, or
if any of the passed verification keys fails to verify a signature.
in_toto.gpg.exceptions.KeyExpirationError:
if any of the passed verification keys is an expired gpg key
"""
in_toto.formats.ANY_VERIFICATION_KEY_DICT_SCHEMA.check_match(keys_dict)
# Fail if an empty verification key dictionary was passed
if len(keys_dict) < 1:
raise SignatureVerificationError(
"Layout signature verification requires at least one key."
)
# Fail if any of the passed keys can't verify a signature on the Layout
for junk, verify_key in six.iteritems(keys_dict):
layout_metablock.verify_signature(verify_key)
|
https://github.com/in-toto/in-toto/issues/282
|
======================================================================
FAIL: test_in_toto_run_with_byproduct (tests.test_runlib.TestInTotoRun)
Successfully run, verify recorded byproduct.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/lukpueh/in-toto/tests/test_runlib.py", line 495, in test_in_toto_run_with_byproduct
"Python" in link.signed.byproducts.get("stderr"))
AssertionError: False is not true
Stderr:
in_toto.runlib:501:INFO:Running 'test_step'...
in_toto.runlib:524:INFO:Running command 'python --version'...
in_toto.runlib:538:INFO:Creating link metadata...
|
AssertionError
|
def request(self, method, url, **kwargs):
if "params" not in kwargs:
kwargs["params"] = {"access_token": self.token}
r = requests.request(method=method, url=url, **kwargs)
r.raise_for_status()
json = r.json()
if check_error(json):
return json
|
def request(self, method, url, **kwargs):
kwargs.setdefault(
"params",
{
"access_token": self.token,
},
)
r = requests.request(method=method, url=url, **kwargs)
r.raise_for_status()
json = r.json()
if check_error(json):
return json
|
https://github.com/offu/WeRoBot/issues/37
|
In [1]: import six
In [2]: def to_text(value, encoding="utf-8"):
...: if isinstance(value, (six.string_types, six.binary_type)):
...: return value.decode(encoding)
...: if isinstance(value, int):
...: return six.text_type(value)
...: assert isinstance(value, six.text_type)
...: return value
...:
In [3]: print to_text(u'\u4e2d\u5348')
---------------------------------------------------------------------------
UnicodeEncodeError Traceback (most recent call last)
<ipython-input-3-9478a71004a0> in <module>()
----> 1 print to_text(u'\u4e2d\u5348')
<ipython-input-2-3b3748f845b1> in to_text(value, encoding)
1 def to_text(value, encoding="utf-8"):
2 if isinstance(value, (six.string_types, six.binary_type)):
----> 3 return value.decode(encoding)
4 if isinstance(value, int):
5 return six.text_type(value)
/root/.virtualenvs/wxPortal/lib/python2.7/encodings/utf_8.pyc in decode(input, errors)
14
15 def decode(input, errors='strict'):
---> 16 return codecs.utf_8_decode(input, errors, True)
17
18 class IncrementalEncoder(codecs.IncrementalEncoder):
UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-1: ordinal not in range(128)
|
UnicodeEncodeError
|
def grant_token(self):
"""
获取 Access Token 。
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=通用接口文档
:return: 返回的 JSON 数据包
"""
return self.get(
url="https://api.weixin.qq.com/cgi-bin/token",
params={
"grant_type": "client_credential",
"appid": self.appid,
"secret": self.appsecret,
},
)
|
def grant_token(self):
"""
获取 Access Token 。
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=通用接口文档
:param appid: 第三方用户唯一凭证
:param appsecret: 第三方用户唯一凭证密钥,即 App Secret
:return: 返回的 JSON 数据包
"""
return self.get(
url="https://api.weixin.qq.com/cgi-bin/token",
params={
"grant_type": "client_credential",
"appid": self.appid,
"secret": self.appsecret,
},
)
|
https://github.com/offu/WeRoBot/issues/37
|
In [1]: import six
In [2]: def to_text(value, encoding="utf-8"):
...: if isinstance(value, (six.string_types, six.binary_type)):
...: return value.decode(encoding)
...: if isinstance(value, int):
...: return six.text_type(value)
...: assert isinstance(value, six.text_type)
...: return value
...:
In [3]: print to_text(u'\u4e2d\u5348')
---------------------------------------------------------------------------
UnicodeEncodeError Traceback (most recent call last)
<ipython-input-3-9478a71004a0> in <module>()
----> 1 print to_text(u'\u4e2d\u5348')
<ipython-input-2-3b3748f845b1> in to_text(value, encoding)
1 def to_text(value, encoding="utf-8"):
2 if isinstance(value, (six.string_types, six.binary_type)):
----> 3 return value.decode(encoding)
4 if isinstance(value, int):
5 return six.text_type(value)
/root/.virtualenvs/wxPortal/lib/python2.7/encodings/utf_8.pyc in decode(input, errors)
14
15 def decode(input, errors='strict'):
---> 16 return codecs.utf_8_decode(input, errors, True)
17
18 class IncrementalEncoder(codecs.IncrementalEncoder):
UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-1: ordinal not in range(128)
|
UnicodeEncodeError
|
def create_menu(self, menu_data):
"""
创建自定义菜单 ::
client = Client("id", "secret")
client.create_menu({
"button":[
{
"type":"click",
"name":"今日歌曲",
"key":"V1001_TODAY_MUSIC"
},
{
"type":"click",
"name":"歌手简介",
"key":"V1001_TODAY_SINGER"
},
{
"name":"菜单",
"sub_button":[
{
"type":"view",
"name":"搜索",
"url":"http://www.soso.com/"
},
{
"type":"view",
"name":"视频",
"url":"http://v.qq.com/"
},
{
"type":"click",
"name":"赞一下我们",
"key":"V1001_GOOD"
}
]
}
]})
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=自定义菜单创建接口
:param menu_data: Python 字典
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/menu/create", data=menu_data
)
|
def create_menu(self, menu_data):
"""
创建自定义菜单 ::
client = Client("id", "secret")
client.create_menu({
"button":[
{
"type":"click",
"name":"今日歌曲",
"key":"V1001_TODAY_MUSIC"
},
{
"type":"click",
"name":"歌手简介",
"key":"V1001_TODAY_SINGER"
},
{
"name":"菜单",
"sub_button":[
{
"type":"view",
"name":"搜索",
"url":"http://www.soso.com/"
},
{
"type":"view",
"name":"视频",
"url":"http://v.qq.com/"
},
{
"type":"click",
"name":"赞一下我们",
"key":"V1001_GOOD"
}
]
}
]})
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=自定义菜单创建接口
:param access_token: Access Token,可以使用 :func:`get_token` 获取。
:param menu_data: Python 字典
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/menu/create", data=menu_data
)
|
https://github.com/offu/WeRoBot/issues/37
|
In [1]: import six
In [2]: def to_text(value, encoding="utf-8"):
...: if isinstance(value, (six.string_types, six.binary_type)):
...: return value.decode(encoding)
...: if isinstance(value, int):
...: return six.text_type(value)
...: assert isinstance(value, six.text_type)
...: return value
...:
In [3]: print to_text(u'\u4e2d\u5348')
---------------------------------------------------------------------------
UnicodeEncodeError Traceback (most recent call last)
<ipython-input-3-9478a71004a0> in <module>()
----> 1 print to_text(u'\u4e2d\u5348')
<ipython-input-2-3b3748f845b1> in to_text(value, encoding)
1 def to_text(value, encoding="utf-8"):
2 if isinstance(value, (six.string_types, six.binary_type)):
----> 3 return value.decode(encoding)
4 if isinstance(value, int):
5 return six.text_type(value)
/root/.virtualenvs/wxPortal/lib/python2.7/encodings/utf_8.pyc in decode(input, errors)
14
15 def decode(input, errors='strict'):
---> 16 return codecs.utf_8_decode(input, errors, True)
17
18 class IncrementalEncoder(codecs.IncrementalEncoder):
UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-1: ordinal not in range(128)
|
UnicodeEncodeError
|
def upload_media(self, media_type, media_file):
"""
上传多媒体文件。
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=上传下载多媒体文件
:param media_type: 媒体文件类型,分别有图片(image)、语音(voice)、视频(video)和缩略图(thumb)
:param media_file:要上传的文件,一个 File-object
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/menu/create",
params={"access_token": self.token, "type": media_type},
files={"media": media_file},
)
|
def upload_media(self, type, media):
"""
上传多媒体文件。
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=上传下载多媒体文件
:param type: 媒体文件类型,分别有图片(image)、语音(voice)、视频(video)和缩略图(thumb)
:param media:要上传的文件,一个 File-object
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/menu/create",
params={"access_token": self.token, "type": type},
files={"media": media},
)
|
https://github.com/offu/WeRoBot/issues/37
|
In [1]: import six
In [2]: def to_text(value, encoding="utf-8"):
...: if isinstance(value, (six.string_types, six.binary_type)):
...: return value.decode(encoding)
...: if isinstance(value, int):
...: return six.text_type(value)
...: assert isinstance(value, six.text_type)
...: return value
...:
In [3]: print to_text(u'\u4e2d\u5348')
---------------------------------------------------------------------------
UnicodeEncodeError Traceback (most recent call last)
<ipython-input-3-9478a71004a0> in <module>()
----> 1 print to_text(u'\u4e2d\u5348')
<ipython-input-2-3b3748f845b1> in to_text(value, encoding)
1 def to_text(value, encoding="utf-8"):
2 if isinstance(value, (six.string_types, six.binary_type)):
----> 3 return value.decode(encoding)
4 if isinstance(value, int):
5 return six.text_type(value)
/root/.virtualenvs/wxPortal/lib/python2.7/encodings/utf_8.pyc in decode(input, errors)
14
15 def decode(input, errors='strict'):
---> 16 return codecs.utf_8_decode(input, errors, True)
17
18 class IncrementalEncoder(codecs.IncrementalEncoder):
UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-1: ordinal not in range(128)
|
UnicodeEncodeError
|
def create_group(self, name):
"""
创建分组
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=分组管理接口
:param name: 分组名字(30个字符以内)
:return: 返回的 JSON 数据包
"""
name = to_text(name)
return self.post(
url="https://api.weixin.qq.com/cgi-bin/groups/create",
data={"group": {"name": name}},
)
|
def create_group(self, name):
"""
创建分组
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=分组管理接口
:param name: 分组名字(30个字符以内)
:return: 返回的 JSON 数据包
"""
name = to_unicode(name)
return self.post(
url="https://api.weixin.qq.com/cgi-bin/groups/create",
data={"group": {"name": name}},
)
|
https://github.com/offu/WeRoBot/issues/37
|
In [1]: import six
In [2]: def to_text(value, encoding="utf-8"):
...: if isinstance(value, (six.string_types, six.binary_type)):
...: return value.decode(encoding)
...: if isinstance(value, int):
...: return six.text_type(value)
...: assert isinstance(value, six.text_type)
...: return value
...:
In [3]: print to_text(u'\u4e2d\u5348')
---------------------------------------------------------------------------
UnicodeEncodeError Traceback (most recent call last)
<ipython-input-3-9478a71004a0> in <module>()
----> 1 print to_text(u'\u4e2d\u5348')
<ipython-input-2-3b3748f845b1> in to_text(value, encoding)
1 def to_text(value, encoding="utf-8"):
2 if isinstance(value, (six.string_types, six.binary_type)):
----> 3 return value.decode(encoding)
4 if isinstance(value, int):
5 return six.text_type(value)
/root/.virtualenvs/wxPortal/lib/python2.7/encodings/utf_8.pyc in decode(input, errors)
14
15 def decode(input, errors='strict'):
---> 16 return codecs.utf_8_decode(input, errors, True)
17
18 class IncrementalEncoder(codecs.IncrementalEncoder):
UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-1: ordinal not in range(128)
|
UnicodeEncodeError
|
def update_group(self, group_id, name):
"""
修改分组名
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=分组管理接口
:param group_id: 分组id,由微信分配
:param name: 分组名字(30个字符以内)
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/groups/update",
data={"group": {"id": int(group_id), "name": to_text(name)}},
)
|
def update_group(self, group_id, name):
"""
修改分组名
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=分组管理接口
:param group_id: 分组id,由微信分配
:param name: 分组名字(30个字符以内)
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/groups/update",
data={"group": {"id": int(group_id), "name": to_unicode(name)}},
)
|
https://github.com/offu/WeRoBot/issues/37
|
In [1]: import six
In [2]: def to_text(value, encoding="utf-8"):
...: if isinstance(value, (six.string_types, six.binary_type)):
...: return value.decode(encoding)
...: if isinstance(value, int):
...: return six.text_type(value)
...: assert isinstance(value, six.text_type)
...: return value
...:
In [3]: print to_text(u'\u4e2d\u5348')
---------------------------------------------------------------------------
UnicodeEncodeError Traceback (most recent call last)
<ipython-input-3-9478a71004a0> in <module>()
----> 1 print to_text(u'\u4e2d\u5348')
<ipython-input-2-3b3748f845b1> in to_text(value, encoding)
1 def to_text(value, encoding="utf-8"):
2 if isinstance(value, (six.string_types, six.binary_type)):
----> 3 return value.decode(encoding)
4 if isinstance(value, int):
5 return six.text_type(value)
/root/.virtualenvs/wxPortal/lib/python2.7/encodings/utf_8.pyc in decode(input, errors)
14
15 def decode(input, errors='strict'):
---> 16 return codecs.utf_8_decode(input, errors, True)
17
18 class IncrementalEncoder(codecs.IncrementalEncoder):
UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-1: ordinal not in range(128)
|
UnicodeEncodeError
|
def to_text(value, encoding="utf-8"):
if isinstance(value, six.binary_type):
return value.decode(encoding)
if isinstance(value, six.integer_types):
return six.text_type(value)
assert isinstance(value, six.text_type)
return value
|
def to_text(value, encoding="utf-8"):
if isinstance(value, (six.string_types, six.binary_type)):
return value.decode(encoding)
if isinstance(value, int):
return six.text_type(value)
assert isinstance(value, six.text_type)
return value
|
https://github.com/offu/WeRoBot/issues/37
|
In [1]: import six
In [2]: def to_text(value, encoding="utf-8"):
...: if isinstance(value, (six.string_types, six.binary_type)):
...: return value.decode(encoding)
...: if isinstance(value, int):
...: return six.text_type(value)
...: assert isinstance(value, six.text_type)
...: return value
...:
In [3]: print to_text(u'\u4e2d\u5348')
---------------------------------------------------------------------------
UnicodeEncodeError Traceback (most recent call last)
<ipython-input-3-9478a71004a0> in <module>()
----> 1 print to_text(u'\u4e2d\u5348')
<ipython-input-2-3b3748f845b1> in to_text(value, encoding)
1 def to_text(value, encoding="utf-8"):
2 if isinstance(value, (six.string_types, six.binary_type)):
----> 3 return value.decode(encoding)
4 if isinstance(value, int):
5 return six.text_type(value)
/root/.virtualenvs/wxPortal/lib/python2.7/encodings/utf_8.pyc in decode(input, errors)
14
15 def decode(input, errors='strict'):
---> 16 return codecs.utf_8_decode(input, errors, True)
17
18 class IncrementalEncoder(codecs.IncrementalEncoder):
UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-1: ordinal not in range(128)
|
UnicodeEncodeError
|
def to_binary(value, encoding="utf-8"):
if isinstance(value, six.text_type):
return value.encode(encoding)
if isinstance(value, six.integer_types):
return six.binary_type(value)
assert isinstance(value, six.binary_type)
return value
|
def to_binary(value, encoding="utf-8"):
if isinstance(value, six.text_type):
return value.encode(encoding)
if isinstance(value, int):
return six.binary_type(value)
assert isinstance(value, six.binary_type)
return value
|
https://github.com/offu/WeRoBot/issues/37
|
In [1]: import six
In [2]: def to_text(value, encoding="utf-8"):
...: if isinstance(value, (six.string_types, six.binary_type)):
...: return value.decode(encoding)
...: if isinstance(value, int):
...: return six.text_type(value)
...: assert isinstance(value, six.text_type)
...: return value
...:
In [3]: print to_text(u'\u4e2d\u5348')
---------------------------------------------------------------------------
UnicodeEncodeError Traceback (most recent call last)
<ipython-input-3-9478a71004a0> in <module>()
----> 1 print to_text(u'\u4e2d\u5348')
<ipython-input-2-3b3748f845b1> in to_text(value, encoding)
1 def to_text(value, encoding="utf-8"):
2 if isinstance(value, (six.string_types, six.binary_type)):
----> 3 return value.decode(encoding)
4 if isinstance(value, int):
5 return six.text_type(value)
/root/.virtualenvs/wxPortal/lib/python2.7/encodings/utf_8.pyc in decode(input, errors)
14
15 def decode(input, errors='strict'):
---> 16 return codecs.utf_8_decode(input, errors, True)
17
18 class IncrementalEncoder(codecs.IncrementalEncoder):
UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-1: ordinal not in range(128)
|
UnicodeEncodeError
|
def format(self, record):
"""Prettify the log output, annotate with simulation time"""
msg = record.getMessage()
# Need to colour each line in case coloring is applied in the message
msg = "\n".join(
[
SimColourLogFormatter.loglevel2colour.get(record.levelno, "%s") % line
for line in msg.split("\n")
]
)
level = SimColourLogFormatter.loglevel2colour.get(
record.levelno, "%s"
) % record.levelname.ljust(_LEVEL_CHARS)
return self._format(level, record, msg, coloured=True)
|
def format(self, record):
"""Prettify the log output, annotate with simulation time"""
msg = record.getMessage()
# Need to colour each line in case coloring is applied in the message
msg = "\n".join(
[
SimColourLogFormatter.loglevel2colour[record.levelno] % line
for line in msg.split("\n")
]
)
level = SimColourLogFormatter.loglevel2colour[
record.levelno
] % record.levelname.ljust(_LEVEL_CHARS)
return self._format(level, record, msg, coloured=True)
|
https://github.com/cocotb/cocotb/issues/2362
|
Traceback (most recent call last):
File "/Users/raysalemi/opt/anaconda3/lib/python3.8/logging/__init__.py", line 1081, in emit
msg = self.format(record)
File "/Users/raysalemi/opt/anaconda3/lib/python3.8/logging/__init__.py", line 925, in format
return fmt.format(record)
File "/Users/raysalemi/opt/anaconda3/lib/python3.8/site-packages/cocotb/log.py", line 251, in format
msg = '\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\n')])
File "/Users/raysalemi/opt/anaconda3/lib/python3.8/site-packages/cocotb/log.py", line 251, in <listcomp>
msg = '\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\n')])
KeyError: 5
Call stack:
|
KeyError
|
def format(self, record):
"""Prettify the log output, annotate with simulation time"""
msg = record.getMessage()
level = record.levelname.ljust(_LEVEL_CHARS)
return self._format(level, record, msg)
|
def format(self, record):
"""Prettify the log output, annotate with simulation time"""
if record.args:
msg = record.msg % record.args
else:
msg = record.msg
msg = str(msg)
level = record.levelname.ljust(_LEVEL_CHARS)
return self._format(level, record, msg)
|
https://github.com/cocotb/cocotb/issues/1408
|
Traceback (most recent call last):
File "/usr/lib64/python3.7/logging/__init__.py", line 1034, in emit
msg = self.format(record)
File "/usr/lib64/python3.7/logging/__init__.py", line 880, in format
return fmt.format(record)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/log.py", line 201, in format
msg = '\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\n')])
AttributeError: 'BinaryValue' object has no attribute 'split'
Call stack:
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py", line 355, in react
self._event_loop(trigger)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py", line 448, in _event_loop
self.schedule(coro, trigger=trigger)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py", line 758, in schedule
result = coroutine._advance(send_outcome)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/decorators.py", line 264, in _advance
return super(RunningTest, self)._advance(outcome)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/decorators.py", line 146, in _advance
return outcome.send(self._coro)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/outcomes.py", line 38, in send
return gen.send(self.value)
File "path/to/test_fifo.py", line 45, in test_fifo_manual
dut._log.info(dut.empty.value)
|
AttributeError
|
def format(self, record):
"""Prettify the log output, annotate with simulation time"""
msg = record.getMessage()
# Need to colour each line in case coloring is applied in the message
msg = "\n".join(
[
SimColourLogFormatter.loglevel2colour[record.levelno] % line
for line in msg.split("\n")
]
)
level = SimColourLogFormatter.loglevel2colour[
record.levelno
] % record.levelname.ljust(_LEVEL_CHARS)
return self._format(level, record, msg, coloured=True)
|
def format(self, record):
"""Prettify the log output, annotate with simulation time"""
if record.args:
msg = record.msg % record.args
else:
msg = record.msg
# Need to colour each line in case coloring is applied in the message
msg = "\n".join(
[
SimColourLogFormatter.loglevel2colour[record.levelno] % line
for line in msg.split("\n")
]
)
level = SimColourLogFormatter.loglevel2colour[
record.levelno
] % record.levelname.ljust(_LEVEL_CHARS)
return self._format(level, record, msg, coloured=True)
|
https://github.com/cocotb/cocotb/issues/1408
|
Traceback (most recent call last):
File "/usr/lib64/python3.7/logging/__init__.py", line 1034, in emit
msg = self.format(record)
File "/usr/lib64/python3.7/logging/__init__.py", line 880, in format
return fmt.format(record)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/log.py", line 201, in format
msg = '\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\n')])
AttributeError: 'BinaryValue' object has no attribute 'split'
Call stack:
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py", line 355, in react
self._event_loop(trigger)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py", line 448, in _event_loop
self.schedule(coro, trigger=trigger)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/scheduler.py", line 758, in schedule
result = coroutine._advance(send_outcome)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/decorators.py", line 264, in _advance
return super(RunningTest, self)._advance(outcome)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/decorators.py", line 146, in _advance
return outcome.send(self._coro)
File "/home/philipp/.local/lib/python3.7/site-packages/cocotb/outcomes.py", line 38, in send
return gen.send(self.value)
File "path/to/test_fifo.py", line 45, in test_fifo_manual
dut._log.info(dut.empty.value)
|
AttributeError
|
def build_edges(nodes: List[ManifestNode]):
"""Build the forward and backward edges on the given list of ParsedNodes
and return them as two separate dictionaries, each mapping unique IDs to
lists of edges.
"""
backward_edges: Dict[str, List[str]] = {}
# pre-populate the forward edge dict for simplicity
forward_edges: Dict[str, List[str]] = {n.unique_id: [] for n in nodes}
for node in nodes:
backward_edges[node.unique_id] = node.depends_on_nodes[:]
for unique_id in node.depends_on_nodes:
if unique_id in forward_edges.keys():
forward_edges[unique_id].append(node.unique_id)
return _sort_values(forward_edges), _sort_values(backward_edges)
|
def build_edges(nodes: List[ManifestNode]):
"""Build the forward and backward edges on the given list of ParsedNodes
and return them as two separate dictionaries, each mapping unique IDs to
lists of edges.
"""
backward_edges: Dict[str, List[str]] = {}
# pre-populate the forward edge dict for simplicity
forward_edges: Dict[str, List[str]] = {n.unique_id: [] for n in nodes}
for node in nodes:
backward_edges[node.unique_id] = node.depends_on_nodes[:]
for unique_id in node.depends_on_nodes:
forward_edges[unique_id].append(node.unique_id)
return _sort_values(forward_edges), _sort_values(backward_edges)
|
https://github.com/fishtown-analytics/dbt/issues/2875
|
~/git/jaffle_shop(bug/defer-with-filters) » dbt -d run --models config.materialized:table,state:modified+ --defer --state ./deferring_target 2 ↵ naveen@Naveens-MacBook-Pro
2020-11-09 22:41:10.593567 (MainThread): Running with dbt=0.18.1
2020-11-09 22:41:12.097193 (MainThread): running dbt with arguments Namespace(cls=<class 'dbt.task.run.RunTask'>, debug=True, defer=True, exclude=None, fail_fast=False, full_refresh=False, log_cache_events=False, log_format='default', models=['config.materialized:table,state:modified+'], partial_parse=None, profile=None, profiles_dir='/Users/naveen/.dbt', project_dir=None, record_timing_info=None, rpc_method='run', selector_name=None, single_threaded=False, state=PosixPath('deferring_target'), strict=False, target=None, test_new_parser=False, threads=None, use_cache=True, use_colors=None, vars='{}', version_check=True, warn_error=False, which='run', write_json=True)
2020-11-09 22:41:12.098467 (MainThread): Tracking: tracking
2020-11-09 22:41:12.112634 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'start', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10f0d4a60>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10f0daf10>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10f0da400>]}
2020-11-09 22:41:12.144638 (MainThread): Partial parsing not enabled
2020-11-09 22:41:12.146188 (MainThread): Parsing macros/catalog.sql
2020-11-09 22:41:12.151603 (MainThread): Parsing macros/relations.sql
2020-11-09 22:41:12.154273 (MainThread): Parsing macros/adapters.sql
2020-11-09 22:41:12.183946 (MainThread): Parsing macros/materializations/snapshot_merge.sql
2020-11-09 22:41:12.188553 (MainThread): Parsing macros/core.sql
2020-11-09 22:41:12.193896 (MainThread): Parsing macros/materializations/helpers.sql
2020-11-09 22:41:12.205495 (MainThread): Parsing macros/materializations/snapshot/snapshot_merge.sql
2020-11-09 22:41:12.208681 (MainThread): Parsing macros/materializations/snapshot/strategies.sql
2020-11-09 22:41:12.228941 (MainThread): Parsing macros/materializations/snapshot/snapshot.sql
2020-11-09 22:41:12.271125 (MainThread): Parsing macros/materializations/seed/seed.sql
2020-11-09 22:41:12.299178 (MainThread): Parsing macros/materializations/incremental/helpers.sql
2020-11-09 22:41:12.301574 (MainThread): Parsing macros/materializations/incremental/incremental.sql
2020-11-09 22:41:12.318498 (MainThread): Parsing macros/materializations/common/merge.sql
2020-11-09 22:41:12.351718 (MainThread): Parsing macros/materializations/table/table.sql
2020-11-09 22:41:12.360024 (MainThread): Parsing macros/materializations/view/view.sql
2020-11-09 22:41:12.368574 (MainThread): Parsing macros/materializations/view/create_or_replace_view.sql
2020-11-09 22:41:12.375325 (MainThread): Parsing macros/etc/get_custom_alias.sql
2020-11-09 22:41:12.376571 (MainThread): Parsing macros/etc/query.sql
2020-11-09 22:41:12.378245 (MainThread): Parsing macros/etc/is_incremental.sql
2020-11-09 22:41:12.381212 (MainThread): Parsing macros/etc/datetime.sql
2020-11-09 22:41:12.393173 (MainThread): Parsing macros/etc/get_custom_schema.sql
2020-11-09 22:41:12.395858 (MainThread): Parsing macros/etc/get_custom_database.sql
2020-11-09 22:41:12.398614 (MainThread): Parsing macros/adapters/common.sql
2020-11-09 22:41:12.478857 (MainThread): Parsing macros/schema_tests/relationships.sql
2020-11-09 22:41:12.481771 (MainThread): Parsing macros/schema_tests/not_null.sql
2020-11-09 22:41:12.483674 (MainThread): Parsing macros/schema_tests/unique.sql
2020-11-09 22:41:12.487213 (MainThread): Parsing macros/schema_tests/accepted_values.sql
2020-11-09 22:41:12.498646 (MainThread): * Deprecation Warning: dbt v0.17.0 introduces a new config format for the
dbt_project.yml file. Support for the existing version 1 format will be removed
in a future release of dbt. The following packages are currently configured with
config version 1:
- jaffle_shop
For upgrading instructions, consult the documentation:
https://docs.getdbt.com/docs/guides/migration-guide/upgrading-to-0-17-0
2020-11-09 22:41:12.498840 (MainThread): Sending event: {'category': 'dbt', 'action': 'deprecation', 'label': '909baabc-7612-4869-8c50-a18d9cfa843b', 'property_': 'warn', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10f108b80>]}
2020-11-09 22:41:12.499374 (MainThread): Partial parsing not enabled
2020-11-09 22:41:12.530387 (MainThread): Acquiring new postgres connection "model.jaffle_shop.stg_customers".
2020-11-09 22:41:12.549148 (MainThread): Acquiring new postgres connection "model.jaffle_shop.stg_payments".
2020-11-09 22:41:12.559049 (MainThread): Acquiring new postgres connection "model.jaffle_shop.stg_orders".
2020-11-09 22:41:12.570599 (MainThread): Acquiring new postgres connection "model.jaffle_shop.dim_customers".
2020-11-09 22:41:12.584907 (MainThread): Acquiring new postgres connection "model.jaffle_shop.fct_orders".
2020-11-09 22:41:12.594309 (MainThread): Acquiring new postgres connection "model.jaffle_shop.order_payments".
2020-11-09 22:41:12.604708 (MainThread): Acquiring new postgres connection "model.jaffle_shop.customer_payments".
2020-11-09 22:41:12.613672 (MainThread): Acquiring new postgres connection "model.jaffle_shop.customer_orders".
2020-11-09 22:41:13.141491 (MainThread): Found 8 models, 20 tests, 0 snapshots, 0 analyses, 138 macros, 0 operations, 3 seed files, 0 sources
2020-11-09 22:41:13.144965 (MainThread):
2020-11-09 22:41:13.145323 (MainThread): Merged 6 items from state (sample: ['seed.jaffle_shop.raw_orders', 'model.jaffle_shop.stg_payments', 'seed.jaffle_shop.raw_payments', 'model.jaffle_shop.stg_orders', 'seed.jaffle_shop.raw_customers'])
2020-11-09 22:41:13.145489 (MainThread): Connection 'model.jaffle_shop.customer_orders' was properly closed.
2020-11-09 22:41:13.145797 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'end', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10f20f100>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10f20ffd0>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10f20ff70>]}
2020-11-09 22:41:13.146030 (MainThread): Flushing usage events
2020-11-09 22:41:13.338030 (MainThread): Encountered an error:
2020-11-09 22:41:13.338333 (MainThread): 'source.jaffle_shop.public.source1'
2020-11-09 22:41:13.343158 (MainThread): Traceback (most recent call last):
File "/Users/naveen/.pyenv/versions/3.8.2/envs/dbt-core/lib/python3.8/site-packages/dbt/main.py", line 124, in main
results, succeeded = handle_and_check(args)
File "/Users/naveen/.pyenv/versions/3.8.2/envs/dbt-core/lib/python3.8/site-packages/dbt/main.py", line 202, in handle_and_check
task, res = run_from_args(parsed)
File "/Users/naveen/.pyenv/versions/3.8.2/envs/dbt-core/lib/python3.8/site-packages/dbt/main.py", line 255, in run_from_args
results = task.run()
File "/Users/naveen/.pyenv/versions/3.8.2/envs/dbt-core/lib/python3.8/site-packages/dbt/task/runnable.py", line 419, in run
result = self.execute_with_hooks(selected_uids)
File "/Users/naveen/.pyenv/versions/3.8.2/envs/dbt-core/lib/python3.8/site-packages/dbt/task/runnable.py", line 379, in execute_with_hooks
self.before_run(adapter, selected_uids)
File "/Users/naveen/.pyenv/versions/3.8.2/envs/dbt-core/lib/python3.8/site-packages/dbt/task/run.py", line 392, in before_run
self.defer_to_manifest(selected_uids)
File "/Users/naveen/.pyenv/versions/3.8.2/envs/dbt-core/lib/python3.8/site-packages/dbt/task/run.py", line 389, in defer_to_manifest
self.write_manifest()
File "/Users/naveen/.pyenv/versions/3.8.2/envs/dbt-core/lib/python3.8/site-packages/dbt/task/runnable.py", line 62, in write_manifest
self.manifest.write(path)
File "/Users/naveen/.pyenv/versions/3.8.2/envs/dbt-core/lib/python3.8/site-packages/dbt/contracts/graph/manifest.py", line 762, in write
self.writable_manifest().write(path)
File "/Users/naveen/.pyenv/versions/3.8.2/envs/dbt-core/lib/python3.8/site-packages/dbt/contracts/graph/manifest.py", line 741, in writable_manifest
forward_edges, backward_edges = build_edges(edge_members)
File "/Users/naveen/.pyenv/versions/3.8.2/envs/dbt-core/lib/python3.8/site-packages/dbt/contracts/graph/manifest.py", line 231, in build_edges
forward_edges[unique_id].append(node.unique_id)
KeyError: 'source.jaffle_shop.public.source1'
|
KeyError
|
def fetch_cluster_credentials(
cls, db_user, db_name, cluster_id, iam_profile, duration_s, autocreate, db_groups
):
"""Fetches temporary login credentials from AWS. The specified user
must already exist in the database, or else an error will occur"""
if iam_profile is None:
session = boto3.Session()
boto_client = session.client("redshift")
else:
logger.debug(
"Connecting to Redshift using 'IAM'" + f"with profile {iam_profile}"
)
boto_session = boto3.Session(profile_name=iam_profile)
boto_client = boto_session.client("redshift")
try:
return boto_client.get_cluster_credentials(
DbUser=db_user,
DbName=db_name,
ClusterIdentifier=cluster_id,
DurationSeconds=duration_s,
AutoCreate=autocreate,
DbGroups=db_groups,
)
except boto_client.exceptions.ClientError as e:
raise dbt.exceptions.FailedToConnectException(
"Unable to get temporary Redshift cluster credentials: {}".format(e)
)
|
def fetch_cluster_credentials(
cls, db_user, db_name, cluster_id, iam_profile, duration_s, autocreate, db_groups
):
"""Fetches temporary login credentials from AWS. The specified user
must already exist in the database, or else an error will occur"""
if iam_profile is None:
boto_client = boto3.client("redshift")
else:
logger.debug(
"Connecting to Redshift using 'IAM'" + f"with profile {iam_profile}"
)
boto_session = boto3.Session(profile_name=iam_profile)
boto_client = boto_session.client("redshift")
try:
return boto_client.get_cluster_credentials(
DbUser=db_user,
DbName=db_name,
ClusterIdentifier=cluster_id,
DurationSeconds=duration_s,
AutoCreate=autocreate,
DbGroups=db_groups,
)
except boto_client.exceptions.ClientError as e:
raise dbt.exceptions.FailedToConnectException(
"Unable to get temporary Redshift cluster credentials: {}".format(e)
)
|
https://github.com/fishtown-analytics/dbt/issues/2756
|
2020-09-14 11:15:23.743840 (MainThread): Traceback (most recent call last):
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/postgres/connections.py", line 46, in exception_handler
yield
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/sql/connections.py", line 76, in add_query
cursor = connection.handle.cursor()
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/contracts/connection.py", line 69, in handle
self._handle.resolve(self)
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/contracts/connection.py", line 90, in resolve
return self.opener(connection)
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/postgres/connections.py", line 77, in open
credentials = cls.get_credentials(connection.credentials)
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/redshift/connections.py", line 152, in get_credentials
return cls.get_tmp_iam_cluster_credentials(credentials)
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/redshift/connections.py", line 128, in get_tmp_iam_cluster_credentials
credentials.db_groups,
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/redshift/connections.py", line 93, in fetch_cluster_credentials
boto_client = boto3.client('redshift')
File "/Users/xxx/venv/lib/python3.7/site-packages/boto3/__init__.py", line 91, in client
return _get_default_session().client(*args, **kwargs)
File "/Users/xxx/venv/lib/python3.7/site-packages/boto3/session.py", line 263, in client
aws_session_token=aws_session_token, config=config)
File "/Users/xxx/venv/lib/python3.7/site-packages/botocore/session.py", line 828, in create_client
endpoint_resolver = self._get_internal_component('endpoint_resolver')
File "/Users/xxx/venv/lib/python3.7/site-packages/botocore/session.py", line 695, in _get_internal_component
return self._internal_components.get_component(name)
File "/Users/xxx/venv/lib/python3.7/site-packages/botocore/session.py", line 907, in get_component
del self._deferred[name]
KeyError: 'endpoint_resolver'
|
KeyError
|
def get_result_from_cursor(cls, cursor: Any) -> agate.Table:
data: List[Any] = []
column_names: List[str] = []
if cursor.description is not None:
column_names = [col[0] for col in cursor.description]
rows = cursor.fetchall()
data = cls.process_results(column_names, rows)
return dbt.clients.agate_helper.table_from_data_flat(data, column_names)
|
def get_result_from_cursor(cls, cursor: Any) -> agate.Table:
data: List[Any] = []
column_names: List[str] = []
if cursor.description is not None:
column_names = [col[0] for col in cursor.description]
rows = cursor.fetchall()
data = cls.process_results(column_names, rows)
return dbt.clients.agate_helper.table_from_data(data, column_names)
|
https://github.com/fishtown-analytics/dbt/issues/2337
|
Traceback (most recent call last):
File \"/Users/drew/fishtown/dbt/core/dbt/node_runners.py\", line 227, in safe_run
result = self.compile_and_execute(manifest, ctx)
File \"/Users/drew/fishtown/dbt/core/dbt/node_runners.py\", line 170, in compile_and_execute
result = self.run(ctx.node, manifest)
File \"/Users/drew/fishtown/dbt/core/dbt/node_runners.py\", line 272, in run
return self.execute(compiled_node, manifest)
File \"/Users/drew/fishtown/dbt/core/dbt/rpc/node_runners.py\", line 86, in execute
compiled_node.injected_sql, fetch=True
File \"/Users/drew/fishtown/dbt/core/dbt/adapters/base/impl.py\", line 227, in execute
fetch=fetch
File \"/Users/drew/fishtown/dbt/core/dbt/adapters/sql/connections.py\", line 119, in execute
table = self.get_result_from_cursor(cursor)
File \"/Users/drew/fishtown/dbt/core/dbt/adapters/sql/connections.py\", line 110, in get_result_from_cursor
return dbt.clients.agate_helper.table_from_data(data, column_names)
File \"/Users/drew/fishtown/dbt/core/dbt/clients/agate_helper.py\", line 84, in table_from_data
return table.select(column_names)
File \"/Users/drew/fishtown/dbt/env/lib/python3.7/site-packages/agate/table/select.py\", line 21, in select
indexes = tuple(self._column_names.index(k) for k in key)
File \"/Users/drew/fishtown/dbt/env/lib/python3.7/site-packages/agate/table/select.py\", line 21, in <genexpr>
indexes = tuple(self._column_names.index(k) for k in key)
ValueError: tuple.index(x): x not in tuple
|
ValueError
|
def execute_macro(
self,
macro_name: str,
manifest: Optional[Manifest] = None,
project: Optional[str] = None,
context_override: Optional[Dict[str, Any]] = None,
kwargs: Dict[str, Any] = None,
release: bool = False,
text_only_columns: Optional[Iterable[str]] = None,
) -> agate.Table:
"""Look macro_name up in the manifest and execute its results.
:param macro_name: The name of the macro to execute.
:param manifest: The manifest to use for generating the base macro
execution context. If none is provided, use the internal manifest.
:param project: The name of the project to search in, or None for the
first match.
:param context_override: An optional dict to update() the macro
execution context.
:param kwargs: An optional dict of keyword args used to pass to the
macro.
:param release: If True, release the connection after executing.
"""
if kwargs is None:
kwargs = {}
if context_override is None:
context_override = {}
if manifest is None:
manifest = self._internal_manifest
macro = manifest.find_macro_by_name(macro_name, self.config.project_name, project)
if macro is None:
if project is None:
package_name = "any package"
else:
package_name = 'the "{}" package'.format(project)
raise RuntimeException(
'dbt could not find a macro with the name "{}" in {}'.format(
macro_name, package_name
)
)
# This causes a reference cycle, as generate_runtime_macro()
# ends up calling get_adapter, so the import has to be here.
from dbt.context.providers import generate_runtime_macro
macro_context = generate_runtime_macro(
macro=macro, config=self.config, manifest=manifest, package_name=project
)
macro_context.update(context_override)
macro_function = MacroGenerator(macro, macro_context)
with self.connections.exception_handler(f"macro {macro_name}"):
try:
result = macro_function(**kwargs)
finally:
if release:
self.release_connection()
return result
|
def execute_macro(
self,
macro_name: str,
manifest: Optional[Manifest] = None,
project: Optional[str] = None,
context_override: Optional[Dict[str, Any]] = None,
kwargs: Dict[str, Any] = None,
release: bool = False,
) -> agate.Table:
"""Look macro_name up in the manifest and execute its results.
:param macro_name: The name of the macro to execute.
:param manifest: The manifest to use for generating the base macro
execution context. If none is provided, use the internal manifest.
:param project: The name of the project to search in, or None for the
first match.
:param context_override: An optional dict to update() the macro
execution context.
:param kwargs: An optional dict of keyword args used to pass to the
macro.
:param release: If True, release the connection after executing.
"""
if kwargs is None:
kwargs = {}
if context_override is None:
context_override = {}
if manifest is None:
manifest = self._internal_manifest
macro = manifest.find_macro_by_name(macro_name, self.config.project_name, project)
if macro is None:
if project is None:
package_name = "any package"
else:
package_name = 'the "{}" package'.format(project)
raise RuntimeException(
'dbt could not find a macro with the name "{}" in {}'.format(
macro_name, package_name
)
)
# This causes a reference cycle, as generate_runtime_macro()
# ends up calling get_adapter, so the import has to be here.
from dbt.context.providers import generate_runtime_macro
macro_context = generate_runtime_macro(
macro=macro, config=self.config, manifest=manifest, package_name=project
)
macro_context.update(context_override)
macro_function = MacroGenerator(macro, macro_context)
with self.connections.exception_handler(f"macro {macro_name}"):
try:
result = macro_function(**kwargs)
finally:
if release:
self.release_connection()
return result
|
https://github.com/fishtown-analytics/dbt/issues/2175
|
2020-03-02 09:10:33,888249 (MainThread): 'decimal.Decimal' object has no attribute 'lower'
2020-03-02 09:10:33,892358 (MainThread): Traceback (most recent call last):
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/main.py", line 80, in main
results, succeeded = handle_and_check(args)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/main.py", line 158, in handle_and_check
task, res = run_from_args(parsed)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/main.py", line 210, in run_from_args
results = task.run()
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/task/generate.py", line 201, in run
catalog_table = adapter.get_catalog(self.manifest)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 1016, in get_catalog
results = self._catalog_filter_table(table, manifest)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/adapters/bigquery/impl.py", line 493, in _catalog_filter_table
return super()._catalog_filter_table(table, manifest)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 998, in _catalog_filter_table
return table.where(_catalog_filter_schemas(manifest))
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/agate/table/where.py", line 25, in where
if test(row):
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 82, in test
return (table_database.lower(), table_schema.lower()) in schemas
AttributeError: 'decimal.Decimal' object has no attribute 'lower'
|
AttributeError
|
def _catalog_filter_table(cls, table: agate.Table, manifest: Manifest) -> agate.Table:
"""Filter the table as appropriate for catalog entries. Subclasses can
override this to change filtering rules on a per-adapter basis.
"""
# force database + schema to be strings
table = table_from_rows(
table.rows,
table.column_names,
text_only_columns=["table_database", "table_schema", "table_name"],
)
return table.where(_catalog_filter_schemas(manifest))
|
def _catalog_filter_table(cls, table: agate.Table, manifest: Manifest) -> agate.Table:
"""Filter the table as appropriate for catalog entries. Subclasses can
override this to change filtering rules on a per-adapter basis.
"""
return table.where(_catalog_filter_schemas(manifest))
|
https://github.com/fishtown-analytics/dbt/issues/2175
|
2020-03-02 09:10:33,888249 (MainThread): 'decimal.Decimal' object has no attribute 'lower'
2020-03-02 09:10:33,892358 (MainThread): Traceback (most recent call last):
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/main.py", line 80, in main
results, succeeded = handle_and_check(args)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/main.py", line 158, in handle_and_check
task, res = run_from_args(parsed)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/main.py", line 210, in run_from_args
results = task.run()
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/task/generate.py", line 201, in run
catalog_table = adapter.get_catalog(self.manifest)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 1016, in get_catalog
results = self._catalog_filter_table(table, manifest)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/adapters/bigquery/impl.py", line 493, in _catalog_filter_table
return super()._catalog_filter_table(table, manifest)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 998, in _catalog_filter_table
return table.where(_catalog_filter_schemas(manifest))
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/agate/table/where.py", line 25, in where
if test(row):
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 82, in test
return (table_database.lower(), table_schema.lower()) in schemas
AttributeError: 'decimal.Decimal' object has no attribute 'lower'
|
AttributeError
|
def table_from_data(data, column_names: Iterable[str]) -> agate.Table:
"Convert list of dictionaries into an Agate table"
# The agate table is generated from a list of dicts, so the column order
# from `data` is not preserved. We can use `select` to reorder the columns
#
# If there is no data, create an empty table with the specified columns
if len(data) == 0:
return agate.Table([], column_names=column_names)
else:
table = agate.Table.from_object(data, column_types=DEFAULT_TYPE_TESTER)
return table.select(column_names)
|
def table_from_data(data, column_names):
"Convert list of dictionaries into an Agate table"
# The agate table is generated from a list of dicts, so the column order
# from `data` is not preserved. We can use `select` to reorder the columns
#
# If there is no data, create an empty table with the specified columns
if len(data) == 0:
return agate.Table([], column_names=column_names)
else:
table = agate.Table.from_object(data, column_types=DEFAULT_TYPE_TESTER)
return table.select(column_names)
|
https://github.com/fishtown-analytics/dbt/issues/2175
|
2020-03-02 09:10:33,888249 (MainThread): 'decimal.Decimal' object has no attribute 'lower'
2020-03-02 09:10:33,892358 (MainThread): Traceback (most recent call last):
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/main.py", line 80, in main
results, succeeded = handle_and_check(args)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/main.py", line 158, in handle_and_check
task, res = run_from_args(parsed)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/main.py", line 210, in run_from_args
results = task.run()
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/task/generate.py", line 201, in run
catalog_table = adapter.get_catalog(self.manifest)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 1016, in get_catalog
results = self._catalog_filter_table(table, manifest)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/adapters/bigquery/impl.py", line 493, in _catalog_filter_table
return super()._catalog_filter_table(table, manifest)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 998, in _catalog_filter_table
return table.where(_catalog_filter_schemas(manifest))
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/agate/table/where.py", line 25, in where
if test(row):
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 82, in test
return (table_database.lower(), table_schema.lower()) in schemas
AttributeError: 'decimal.Decimal' object has no attribute 'lower'
|
AttributeError
|
def table_from_data_flat(data, column_names: Iterable[str]) -> agate.Table:
"Convert list of dictionaries into an Agate table"
rows = []
for _row in data:
row = []
for value in list(_row.values()):
if isinstance(value, (dict, list, tuple)):
row.append(json.dumps(value))
else:
row.append(value)
rows.append(row)
return table_from_rows(rows=rows, column_names=column_names)
|
def table_from_data_flat(data, column_names):
"Convert list of dictionaries into an Agate table"
rows = []
for _row in data:
row = []
for value in list(_row.values()):
if isinstance(value, (dict, list, tuple)):
row.append(json.dumps(value))
else:
row.append(value)
rows.append(row)
return agate.Table(rows, column_names, column_types=DEFAULT_TYPE_TESTER)
|
https://github.com/fishtown-analytics/dbt/issues/2175
|
2020-03-02 09:10:33,888249 (MainThread): 'decimal.Decimal' object has no attribute 'lower'
2020-03-02 09:10:33,892358 (MainThread): Traceback (most recent call last):
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/main.py", line 80, in main
results, succeeded = handle_and_check(args)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/main.py", line 158, in handle_and_check
task, res = run_from_args(parsed)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/main.py", line 210, in run_from_args
results = task.run()
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/task/generate.py", line 201, in run
catalog_table = adapter.get_catalog(self.manifest)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 1016, in get_catalog
results = self._catalog_filter_table(table, manifest)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/adapters/bigquery/impl.py", line 493, in _catalog_filter_table
return super()._catalog_filter_table(table, manifest)
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 998, in _catalog_filter_table
return table.where(_catalog_filter_schemas(manifest))
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/agate/table/where.py", line 25, in where
if test(row):
File "/usr/local/Cellar/dbt/0.15.2_1/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 82, in test
return (table_database.lower(), table_schema.lower()) in schemas
AttributeError: 'decimal.Decimal' object has no attribute 'lower'
|
AttributeError
|
def run(self) -> CatalogResults:
compile_results = None
if self.args.compile:
compile_results = CompileTask.run(self)
if any(r.error is not None for r in compile_results):
dbt.ui.printer.print_timestamped_line(
"compile failed, cannot generate docs"
)
return CatalogResults(
nodes={},
generated_at=datetime.utcnow(),
errors=None,
_compile_results=compile_results,
)
else:
self.manifest = get_full_manifest(self.config)
shutil.copyfile(
DOCS_INDEX_FILE_PATH, os.path.join(self.config.target_path, "index.html")
)
if self.manifest is None:
raise InternalException("self.manifest was None in run!")
adapter = get_adapter(self.config)
with adapter.connection_named("generate_catalog"):
dbt.ui.printer.print_timestamped_line("Building catalog")
catalog_table, exceptions = adapter.get_catalog(self.manifest)
catalog_data: List[PrimitiveDict] = [
dict(zip(catalog_table.column_names, map(_coerce_decimal, row)))
for row in catalog_table
]
catalog = Catalog(catalog_data)
errors: Optional[List[str]] = None
if exceptions:
errors = [str(e) for e in exceptions]
results = self.get_catalog_results(
nodes=catalog.make_unique_id_map(self.manifest),
generated_at=datetime.utcnow(),
compile_results=compile_results,
errors=errors,
)
path = os.path.join(self.config.target_path, CATALOG_FILENAME)
results.write(path)
if self.args.compile:
write_manifest(self.config, self.manifest)
if exceptions:
logger.error(
"dbt encountered {} failure{} while writing the catalog".format(
len(exceptions), (len(exceptions) != 1) * "s"
)
)
dbt.ui.printer.print_timestamped_line(
"Catalog written to {}".format(os.path.abspath(path))
)
return results
|
def run(self):
compile_results = None
if self.args.compile:
compile_results = CompileTask.run(self)
if any(r.error is not None for r in compile_results):
dbt.ui.printer.print_timestamped_line(
"compile failed, cannot generate docs"
)
return CatalogResults({}, datetime.utcnow(), compile_results, None)
shutil.copyfile(
DOCS_INDEX_FILE_PATH, os.path.join(self.config.target_path, "index.html")
)
if self.manifest is None:
raise InternalException("self.manifest was None in run!")
adapter = get_adapter(self.config)
with adapter.connection_named("generate_catalog"):
dbt.ui.printer.print_timestamped_line("Building catalog")
catalog_table, exceptions = adapter.get_catalog(self.manifest)
catalog_data: List[PrimitiveDict] = [
dict(zip(catalog_table.column_names, map(_coerce_decimal, row)))
for row in catalog_table
]
catalog = Catalog(catalog_data)
errors: Optional[List[str]] = None
if exceptions:
errors = [str(e) for e in exceptions]
results = self.get_catalog_results(
nodes=catalog.make_unique_id_map(self.manifest),
generated_at=datetime.utcnow(),
compile_results=compile_results,
errors=errors,
)
path = os.path.join(self.config.target_path, CATALOG_FILENAME)
results.write(path)
write_manifest(self.config, self.manifest)
if exceptions:
logger.error(
"dbt encountered {} failure{} while writing the catalog".format(
len(exceptions), (len(exceptions) != 1) * "s"
)
)
dbt.ui.printer.print_timestamped_line(
"Catalog written to {}".format(os.path.abspath(path))
)
return results
|
https://github.com/fishtown-analytics/dbt/issues/2090
|
2020-02-04 02:04:58,798143 (MainThread): Traceback (most recent call last):
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 81, in main
results, succeeded = handle_and_check(args)
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 159, in handle_and_check
task, res = run_from_args(parsed)
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 212, in run_from_args
results = task.run()
File "/Users/drew/fishtown/dbt/core/dbt/task/generate.py", line 208, in run
'self.manifest was None in run!'
dbt.exceptions.InternalException: self.manifest was None in run!
|
dbt.exceptions.InternalException
|
def interpret_results(self, results: Optional[CatalogResults]) -> bool:
if results is None:
return False
if results.errors:
return False
compile_results = results._compile_results
if compile_results is None:
return True
return super().interpret_results(compile_results)
|
def interpret_results(self, results):
if results.errors:
return False
compile_results = results._compile_results
if compile_results is None:
return True
return super().interpret_results(compile_results)
|
https://github.com/fishtown-analytics/dbt/issues/2090
|
2020-02-04 02:04:58,798143 (MainThread): Traceback (most recent call last):
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 81, in main
results, succeeded = handle_and_check(args)
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 159, in handle_and_check
task, res = run_from_args(parsed)
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 212, in run_from_args
results = task.run()
File "/Users/drew/fishtown/dbt/core/dbt/task/generate.py", line 208, in run
'self.manifest was None in run!'
dbt.exceptions.InternalException: self.manifest was None in run!
|
dbt.exceptions.InternalException
|
def find_blocks(self, allowed_blocks=None, collect_raw_data=True):
"""Find all top-level blocks in the data."""
if allowed_blocks is None:
allowed_blocks = {"snapshot", "macro", "materialization", "docs"}
for tag in self.tag_parser.find_tags():
if tag.block_type_name in _CONTROL_FLOW_TAGS:
self.stack.append(tag.block_type_name)
elif tag.block_type_name in _CONTROL_FLOW_END_TAGS:
found = None
if self.stack:
found = self.stack.pop()
else:
expected = _CONTROL_FLOW_END_TAGS[tag.block_type_name]
dbt.exceptions.raise_compiler_error(
(
"Got an unexpected control flow end tag, got {} but "
"never saw a preceeding {} (@ {})"
).format(tag.block_type_name, expected, tag.start)
)
expected = _CONTROL_FLOW_TAGS[found]
if expected != tag.block_type_name:
dbt.exceptions.raise_compiler_error(
(
"Got an unexpected control flow end tag, got {} but "
"expected {} next (@ {})"
).format(tag.block_type_name, expected, tag.start)
)
if tag.block_type_name in allowed_blocks:
if self.stack:
dbt.exceptions.raise_compiler_error(
(
"Got a block definition inside control flow at {}. "
"All dbt block definitions must be at the top level"
).format(tag.start)
)
if self.current is not None:
dbt.exceptions.raise_compiler_error(
duplicate_tags.format(outer=self.current, inner=tag)
)
if collect_raw_data:
raw_data = self.data[self.last_position : tag.start]
self.last_position = tag.start
if raw_data:
yield BlockData(raw_data)
self.current = tag
elif self.is_current_end(tag):
self.last_position = tag.end
yield BlockTag(
block_type_name=self.current.block_type_name,
block_name=self.current.block_name,
contents=self.data[self.current.end : tag.start],
full_block=self.data[self.current.start : tag.end],
)
self.current = None
if self.current:
linecount = self.data[: self.current.end].count("\n") + 1
dbt.exceptions.raise_compiler_error(
(
"Reached EOF without finding a close tag for {} (searched from line {})"
).format(self.current.block_type_name, linecount)
)
if collect_raw_data:
raw_data = self.data[self.last_position :]
if raw_data:
yield BlockData(raw_data)
|
def find_blocks(self, allowed_blocks=None, collect_raw_data=True):
"""Find all top-level blocks in the data."""
if allowed_blocks is None:
allowed_blocks = {"snapshot", "macro", "materialization", "docs"}
for tag in self.tag_parser.find_tags():
if tag.block_type_name in _CONTROL_FLOW_TAGS:
self.stack.append(tag.block_type_name)
elif tag.block_type_name in _CONTROL_FLOW_END_TAGS:
found = None
if self.stack:
found = self.stack.pop()
else:
expected = _CONTROL_FLOW_END_TAGS[tag.block_type_name]
dbt.exceptions.raise_compiler_error(
(
"Got an unexpected control flow end tag, got {} but "
"never saw a preceeding {} (@ {})"
).format(tag.block_type_name, expected, tag.start)
)
expected = _CONTROL_FLOW_TAGS[found]
if expected != tag.block_type_name:
dbt.exceptions.raise_compiler_error(
(
"Got an unexpected control flow end tag, got {} but "
"expected {} next (@ {})"
).format(tag.block_type_name, expected, tag.start)
)
if tag.block_type_name in allowed_blocks:
if self.stack:
dbt.exceptions.raise_compiler_error(
(
"Got a block definition inside control flow at {}. "
"All dbt block definitions must be at the top level"
).format(tag.start)
)
if self.current is not None:
dbt.exceptions.raise_compiler_error(
duplicate_tags.format(outer=self.current, inner=tag)
)
if collect_raw_data:
raw_data = self.data[self.last_position : tag.start]
self.last_position = tag.start
if raw_data:
yield BlockData(raw_data)
self.current = tag
elif self.is_current_end(tag):
self.last_position = tag.end
yield BlockTag(
block_type_name=self.current.block_type_name,
block_name=self.current.block_name,
contents=self.data[self.current.end : tag.start],
full_block=self.data[self.current.start : tag.end],
)
self.current = None
if self.current:
dbt.exceptions.raise_compiler_error(
(
"Reached EOF without finding a close block for "
"{0.block_type_name} (from {0.end})"
).format(self.current)
)
if collect_raw_data:
raw_data = self.data[self.last_position :]
if raw_data:
yield BlockData(raw_data)
|
https://github.com/fishtown-analytics/dbt/issues/2066
|
Running with dbt=0.15.2-a1
Encountered an error:
Traceback (most recent call last):
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 80, in main
results, succeeded = handle_and_check(args)
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 158, in handle_and_check
task, res = run_from_args(parsed)
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 210, in run_from_args
results = task.run()
File "/Users/drew/fishtown/dbt/core/dbt/task/runnable.py", line 300, in run
self._runtime_initialize()
File "/Users/drew/fishtown/dbt/core/dbt/task/runnable.py", line 81, in _runtime_initialize
super()._runtime_initialize()
File "/Users/drew/fishtown/dbt/core/dbt/task/runnable.py", line 54, in _runtime_initialize
self.load_manifest()
File "/Users/drew/fishtown/dbt/core/dbt/task/runnable.py", line 46, in load_manifest
self.manifest = get_full_manifest(self.config)
File "/Users/drew/fishtown/dbt/core/dbt/perf_utils.py", line 23, in get_full_manifest
return load_manifest(config, internal, set_header)
File "/Users/drew/fishtown/dbt/core/dbt/parser/manifest.py", line 441, in load_manifest
return ManifestLoader.load_all(config, internal_manifest, macro_hook)
File "/Users/drew/fishtown/dbt/core/dbt/parser/manifest.py", line 329, in load_all
loader.load(internal_manifest=internal_manifest)
File "/Users/drew/fishtown/dbt/core/dbt/parser/manifest.py", line 200, in load
self.parse_project(project, macro_manifest, old_results)
File "/Users/drew/fishtown/dbt/core/dbt/parser/manifest.py", line 174, in parse_project
self.parse_with_cache(path, parser, old_results)
File "/Users/drew/fishtown/dbt/core/dbt/parser/manifest.py", line 133, in parse_with_cache
parser.parse_file(block)
File "/Users/drew/fishtown/dbt/core/dbt/parser/docs.py", line 90, in parse_file
for block in searcher:
File "/Users/drew/fishtown/dbt/core/dbt/parser/search.py", line 125, in __iter__
for block in self.extract_blocks(entry):
File "/Users/drew/fishtown/dbt/core/dbt/parser/search.py", line 110, in extract_blocks
collect_raw_data=False
File "/Users/drew/fishtown/dbt/core/dbt/clients/jinja.py", line 399, in extract_toplevel_blocks
collect_raw_data=collect_raw_data
File "/Users/drew/fishtown/dbt/core/dbt/clients/_jinja_blocks.py", line 371, in lex_for_blocks
collect_raw_data=collect_raw_data))
File "/Users/drew/fishtown/dbt/core/dbt/clients/_jinja_blocks.py", line 362, in find_blocks
).format(self.current))
File "/Users/drew/fishtown/dbt/core/dbt/exceptions.py", line 357, in raise_compiler_error
raise CompilationException(msg, node)
dbt.exceptions.CompilationException: <exception str() failed>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/drew/fishtown/dbt/env/bin/dbt", line 7, in <module>
exec(compile(f.read(), __file__, 'exec'))
File "/Users/drew/fishtown/dbt/core/scripts/dbt", line 7, in <module>
dbt.main.main(sys.argv[1:])
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 96, in main
logger.warning(str(e))
File "/Users/drew/fishtown/dbt/core/dbt/exceptions.py", line 99, in __str__
node_string = " in {}".format(self.node_to_string(self.node))
File "/Users/drew/fishtown/dbt/core/dbt/exceptions.py", line 66, in node_to_string
node.resource_type,
AttributeError: 'NotImplementedType' object has no attribute 'resource_type'
|
AttributeError
|
def node_to_string(self, node):
if node is None:
return "<Unknown>"
if not hasattr(node, "name"):
# we probably failed to parse a block, so we can't know the name
return "{} ({})".format(node.resource_type, node.original_file_path)
if hasattr(node, "contents"):
# handle FileBlocks. They aren't really nodes but we want to render
# out the path we know at least. This indicates an error during
# block parsing.
return "{}".format(node.path.original_file_path)
return "{} {} ({})".format(node.resource_type, node.name, node.original_file_path)
|
def node_to_string(self, node):
if node is None:
return "<Unknown>"
if not hasattr(node, "name"):
# we probably failed to parse a block, so we can't know the name
return "{} ({})".format(node.resource_type, node.original_file_path)
return "{} {} ({})".format(node.resource_type, node.name, node.original_file_path)
|
https://github.com/fishtown-analytics/dbt/issues/2066
|
Running with dbt=0.15.2-a1
Encountered an error:
Traceback (most recent call last):
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 80, in main
results, succeeded = handle_and_check(args)
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 158, in handle_and_check
task, res = run_from_args(parsed)
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 210, in run_from_args
results = task.run()
File "/Users/drew/fishtown/dbt/core/dbt/task/runnable.py", line 300, in run
self._runtime_initialize()
File "/Users/drew/fishtown/dbt/core/dbt/task/runnable.py", line 81, in _runtime_initialize
super()._runtime_initialize()
File "/Users/drew/fishtown/dbt/core/dbt/task/runnable.py", line 54, in _runtime_initialize
self.load_manifest()
File "/Users/drew/fishtown/dbt/core/dbt/task/runnable.py", line 46, in load_manifest
self.manifest = get_full_manifest(self.config)
File "/Users/drew/fishtown/dbt/core/dbt/perf_utils.py", line 23, in get_full_manifest
return load_manifest(config, internal, set_header)
File "/Users/drew/fishtown/dbt/core/dbt/parser/manifest.py", line 441, in load_manifest
return ManifestLoader.load_all(config, internal_manifest, macro_hook)
File "/Users/drew/fishtown/dbt/core/dbt/parser/manifest.py", line 329, in load_all
loader.load(internal_manifest=internal_manifest)
File "/Users/drew/fishtown/dbt/core/dbt/parser/manifest.py", line 200, in load
self.parse_project(project, macro_manifest, old_results)
File "/Users/drew/fishtown/dbt/core/dbt/parser/manifest.py", line 174, in parse_project
self.parse_with_cache(path, parser, old_results)
File "/Users/drew/fishtown/dbt/core/dbt/parser/manifest.py", line 133, in parse_with_cache
parser.parse_file(block)
File "/Users/drew/fishtown/dbt/core/dbt/parser/docs.py", line 90, in parse_file
for block in searcher:
File "/Users/drew/fishtown/dbt/core/dbt/parser/search.py", line 125, in __iter__
for block in self.extract_blocks(entry):
File "/Users/drew/fishtown/dbt/core/dbt/parser/search.py", line 110, in extract_blocks
collect_raw_data=False
File "/Users/drew/fishtown/dbt/core/dbt/clients/jinja.py", line 399, in extract_toplevel_blocks
collect_raw_data=collect_raw_data
File "/Users/drew/fishtown/dbt/core/dbt/clients/_jinja_blocks.py", line 371, in lex_for_blocks
collect_raw_data=collect_raw_data))
File "/Users/drew/fishtown/dbt/core/dbt/clients/_jinja_blocks.py", line 362, in find_blocks
).format(self.current))
File "/Users/drew/fishtown/dbt/core/dbt/exceptions.py", line 357, in raise_compiler_error
raise CompilationException(msg, node)
dbt.exceptions.CompilationException: <exception str() failed>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/drew/fishtown/dbt/env/bin/dbt", line 7, in <module>
exec(compile(f.read(), __file__, 'exec'))
File "/Users/drew/fishtown/dbt/core/scripts/dbt", line 7, in <module>
dbt.main.main(sys.argv[1:])
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 96, in main
logger.warning(str(e))
File "/Users/drew/fishtown/dbt/core/dbt/exceptions.py", line 99, in __str__
node_string = " in {}".format(self.node_to_string(self.node))
File "/Users/drew/fishtown/dbt/core/dbt/exceptions.py", line 66, in node_to_string
node.resource_type,
AttributeError: 'NotImplementedType' object has no attribute 'resource_type'
|
AttributeError
|
def extract_blocks(self, source_file: FileBlock) -> Iterable[BlockTag]:
try:
blocks = extract_toplevel_blocks(
source_file.contents,
allowed_blocks=self.allowed_blocks,
collect_raw_data=False,
)
# this makes mypy happy, and this is an invariant we really need
for block in blocks:
assert isinstance(block, BlockTag)
yield block
except CompilationException as exc:
if exc.node is None:
exc.node = source_file
raise
|
def extract_blocks(self, source_file: FileBlock) -> Iterable[BlockTag]:
try:
blocks = extract_toplevel_blocks(
source_file.contents,
allowed_blocks=self.allowed_blocks,
collect_raw_data=False,
)
# this makes mypy happy, and this is an invariant we really need
for block in blocks:
assert isinstance(block, BlockTag)
yield block
except CompilationException as exc:
if exc.node is None:
# TODO(jeb): attach info about resource type/file path here
exc.node = NotImplemented
raise
|
https://github.com/fishtown-analytics/dbt/issues/2066
|
Running with dbt=0.15.2-a1
Encountered an error:
Traceback (most recent call last):
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 80, in main
results, succeeded = handle_and_check(args)
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 158, in handle_and_check
task, res = run_from_args(parsed)
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 210, in run_from_args
results = task.run()
File "/Users/drew/fishtown/dbt/core/dbt/task/runnable.py", line 300, in run
self._runtime_initialize()
File "/Users/drew/fishtown/dbt/core/dbt/task/runnable.py", line 81, in _runtime_initialize
super()._runtime_initialize()
File "/Users/drew/fishtown/dbt/core/dbt/task/runnable.py", line 54, in _runtime_initialize
self.load_manifest()
File "/Users/drew/fishtown/dbt/core/dbt/task/runnable.py", line 46, in load_manifest
self.manifest = get_full_manifest(self.config)
File "/Users/drew/fishtown/dbt/core/dbt/perf_utils.py", line 23, in get_full_manifest
return load_manifest(config, internal, set_header)
File "/Users/drew/fishtown/dbt/core/dbt/parser/manifest.py", line 441, in load_manifest
return ManifestLoader.load_all(config, internal_manifest, macro_hook)
File "/Users/drew/fishtown/dbt/core/dbt/parser/manifest.py", line 329, in load_all
loader.load(internal_manifest=internal_manifest)
File "/Users/drew/fishtown/dbt/core/dbt/parser/manifest.py", line 200, in load
self.parse_project(project, macro_manifest, old_results)
File "/Users/drew/fishtown/dbt/core/dbt/parser/manifest.py", line 174, in parse_project
self.parse_with_cache(path, parser, old_results)
File "/Users/drew/fishtown/dbt/core/dbt/parser/manifest.py", line 133, in parse_with_cache
parser.parse_file(block)
File "/Users/drew/fishtown/dbt/core/dbt/parser/docs.py", line 90, in parse_file
for block in searcher:
File "/Users/drew/fishtown/dbt/core/dbt/parser/search.py", line 125, in __iter__
for block in self.extract_blocks(entry):
File "/Users/drew/fishtown/dbt/core/dbt/parser/search.py", line 110, in extract_blocks
collect_raw_data=False
File "/Users/drew/fishtown/dbt/core/dbt/clients/jinja.py", line 399, in extract_toplevel_blocks
collect_raw_data=collect_raw_data
File "/Users/drew/fishtown/dbt/core/dbt/clients/_jinja_blocks.py", line 371, in lex_for_blocks
collect_raw_data=collect_raw_data))
File "/Users/drew/fishtown/dbt/core/dbt/clients/_jinja_blocks.py", line 362, in find_blocks
).format(self.current))
File "/Users/drew/fishtown/dbt/core/dbt/exceptions.py", line 357, in raise_compiler_error
raise CompilationException(msg, node)
dbt.exceptions.CompilationException: <exception str() failed>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/drew/fishtown/dbt/env/bin/dbt", line 7, in <module>
exec(compile(f.read(), __file__, 'exec'))
File "/Users/drew/fishtown/dbt/core/scripts/dbt", line 7, in <module>
dbt.main.main(sys.argv[1:])
File "/Users/drew/fishtown/dbt/core/dbt/main.py", line 96, in main
logger.warning(str(e))
File "/Users/drew/fishtown/dbt/core/dbt/exceptions.py", line 99, in __str__
node_string = " in {}".format(self.node_to_string(self.node))
File "/Users/drew/fishtown/dbt/core/dbt/exceptions.py", line 66, in node_to_string
node.resource_type,
AttributeError: 'NotImplementedType' object has no attribute 'resource_type'
|
AttributeError
|
def __init__(
self,
log_dir: Optional[str] = None,
level=logbook.DEBUG,
filter=None,
bubble=True,
max_size=10 * 1024 * 1024, # 10 mb
backup_count=5,
) -> None:
self.disabled = False
self._msg_buffer: Optional[List[logbook.LogRecord]] = []
# if we get 1k messages without a logfile being set, something is wrong
self._bufmax = 1000
self._log_path = None
# we need the base handler class' __init__ to run so handling works
logbook.Handler.__init__(self, level, filter, bubble)
if log_dir is not None:
self.set_path(log_dir)
self._text_format_string = None
self._max_size = max_size
self._backup_count = backup_count
|
def __init__(
self,
log_dir: Optional[str] = None,
level=logbook.DEBUG,
filter=None,
bubble=True,
) -> None:
self.disabled = False
self._msg_buffer: Optional[List[logbook.LogRecord]] = []
# if we get 1k messages without a logfile being set, something is wrong
self._bufmax = 1000
self._log_path = None
# we need the base handler class' __init__ to run so handling works
logbook.Handler.__init__(self, level, filter, bubble)
if log_dir is not None:
self.set_path(log_dir)
self._text_format_string = None
|
https://github.com/fishtown-analytics/dbt/issues/1865
|
2019-10-25 03:10:31,150080 (MainThread): Running with dbt=0.15.0-b2
2019-10-25 03:10:31,478080 (MainThread): running dbt with arguments Namespace(cls=<class 'dbt.task.test.TestTask'>, data=False, debug=True, exclude=None, log_cache_events=False, log_format='default', models=['dim_agency_assert_agency_id'], partial_parse=None, profile=None, profiles_dir='C:\\Users\\dbt_fan\\.dbt', project_dir=None, record_timing_info=None, rpc_method='test', schema=False, single_threaded=False, strict=False, target=None, test_new_parser=False, threads=None, use_cache=True, vars='{}', version_check=True, warn_error=False, which='test', write_json=True)
2019-10-25 03:10:31,480087 (MainThread): Encountered an error:
2019-10-25 03:10:31,480087 (MainThread): [WinError 183] Cannot create a file when that file already exists: 'logs\\dbt.log' -> 'C:\\source\\my_dbt\\logs\\dbt-2019-10-24.log'
2019-10-25 03:10:31,482083 (MainThread): Traceback (most recent call last):
File "c:\users\dbt_fan\appdata\local\programs\python\python37\lib\site-packages\dbt\main.py", line 80, in main
results, succeeded = handle_and_check(args)
File "c:\users\dbt_fan\appdata\local\programs\python\python37\lib\site-packages\dbt\main.py", line 158, in handle_and_check
task, res = run_from_args(parsed)
File "c:\users\dbt_fan\appdata\local\programs\python\python37\lib\site-packages\dbt\main.py", line 204, in run_from_args
log_manager.set_path(log_path)
File "c:\users\dbt_fan\appdata\local\programs\python\python37\lib\site-packages\dbt\logger.py", line 483, in set_path
self._file_handler.set_path(path)
File "c:\users\dbt_fan\appdata\local\programs\python\python37\lib\site-packages\dbt\logger.py", line 383, in set_path
self._replay_buffered()
File "c:\users\dbt_fan\appdata\local\programs\python\python37\lib\site-packages\dbt\logger.py", line 402, in _replay_buffered
super().emit(record)
File "c:\users\dbt_fan\appdata\local\programs\python\python37\lib\site-packages\logbook\handlers.py", line 980, in emit
self.perform_rollover(new_timestamp)
File "c:\users\dbt_fan\appdata\local\programs\python\python37\lib\site-packages\logbook\handlers.py", line 962, in perform_rollover
os.rename(self._filename, filename)
FileExistsError: [WinError 183] Cannot create a file when that file already exists: 'logs\\dbt.log' -> 'C:\\source\\my_dbt\\logs\\dbt-2019-10-24.log'
|
FileExistsError
|
def _super_init(self, log_path):
logbook.RotatingFileHandler.__init__(
self,
filename=log_path,
level=self.level,
filter=self.filter,
delay=True,
max_size=self._max_size,
backup_count=self._backup_count,
bubble=self.bubble,
format_string=DEBUG_LOG_FORMAT,
)
FormatterMixin.__init__(self, DEBUG_LOG_FORMAT)
|
def _super_init(self, log_path):
logbook.TimedRotatingFileHandler.__init__(
self,
filename=log_path,
level=self.level,
filter=self.filter,
bubble=self.bubble,
format_string=DEBUG_LOG_FORMAT,
date_format="%Y-%m-%d",
backup_count=7,
timed_filename_for_current=False,
)
FormatterMixin.__init__(self, DEBUG_LOG_FORMAT)
|
https://github.com/fishtown-analytics/dbt/issues/1865
|
2019-10-25 03:10:31,150080 (MainThread): Running with dbt=0.15.0-b2
2019-10-25 03:10:31,478080 (MainThread): running dbt with arguments Namespace(cls=<class 'dbt.task.test.TestTask'>, data=False, debug=True, exclude=None, log_cache_events=False, log_format='default', models=['dim_agency_assert_agency_id'], partial_parse=None, profile=None, profiles_dir='C:\\Users\\dbt_fan\\.dbt', project_dir=None, record_timing_info=None, rpc_method='test', schema=False, single_threaded=False, strict=False, target=None, test_new_parser=False, threads=None, use_cache=True, vars='{}', version_check=True, warn_error=False, which='test', write_json=True)
2019-10-25 03:10:31,480087 (MainThread): Encountered an error:
2019-10-25 03:10:31,480087 (MainThread): [WinError 183] Cannot create a file when that file already exists: 'logs\\dbt.log' -> 'C:\\source\\my_dbt\\logs\\dbt-2019-10-24.log'
2019-10-25 03:10:31,482083 (MainThread): Traceback (most recent call last):
File "c:\users\dbt_fan\appdata\local\programs\python\python37\lib\site-packages\dbt\main.py", line 80, in main
results, succeeded = handle_and_check(args)
File "c:\users\dbt_fan\appdata\local\programs\python\python37\lib\site-packages\dbt\main.py", line 158, in handle_and_check
task, res = run_from_args(parsed)
File "c:\users\dbt_fan\appdata\local\programs\python\python37\lib\site-packages\dbt\main.py", line 204, in run_from_args
log_manager.set_path(log_path)
File "c:\users\dbt_fan\appdata\local\programs\python\python37\lib\site-packages\dbt\logger.py", line 483, in set_path
self._file_handler.set_path(path)
File "c:\users\dbt_fan\appdata\local\programs\python\python37\lib\site-packages\dbt\logger.py", line 383, in set_path
self._replay_buffered()
File "c:\users\dbt_fan\appdata\local\programs\python\python37\lib\site-packages\dbt\logger.py", line 402, in _replay_buffered
super().emit(record)
File "c:\users\dbt_fan\appdata\local\programs\python\python37\lib\site-packages\logbook\handlers.py", line 980, in emit
self.perform_rollover(new_timestamp)
File "c:\users\dbt_fan\appdata\local\programs\python\python37\lib\site-packages\logbook\handlers.py", line 962, in perform_rollover
os.rename(self._filename, filename)
FileExistsError: [WinError 183] Cannot create a file when that file already exists: 'logs\\dbt.log' -> 'C:\\source\\my_dbt\\logs\\dbt-2019-10-24.log'
|
FileExistsError
|
def Table(cls) -> str:
return str(RelationType.Table)
|
def Table(self) -> str:
return str(RelationType.Table)
|
https://github.com/fishtown-analytics/dbt/issues/1698
|
dbt --debug run
Running with dbt=0.14.0
2019-08-26 10:41:40,953 (MainThread): Tracking: tracking
2019-08-26 10:41:40,959 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'start', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10eda3bd0>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10edae510>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10eda3b90>]}
2019-08-26 10:41:41,213 (MainThread): Parsing macros/core.sql
2019-08-26 10:41:41,219 (MainThread): Parsing macros/materializations/helpers.sql
2019-08-26 10:41:41,227 (MainThread): Parsing macros/materializations/snapshot/snapshot_merge.sql
2019-08-26 10:41:41,229 (MainThread): Parsing macros/materializations/snapshot/strategies.sql
2019-08-26 10:41:41,242 (MainThread): Parsing macros/materializations/snapshot/snapshot.sql
2019-08-26 10:41:41,261 (MainThread): Parsing macros/materializations/seed/seed.sql
2019-08-26 10:41:41,275 (MainThread): Parsing macros/materializations/incremental/incremental.sql
2019-08-26 10:41:41,282 (MainThread): Parsing macros/materializations/common/merge.sql
2019-08-26 10:41:41,288 (MainThread): Parsing macros/materializations/table/table.sql
2019-08-26 10:41:41,293 (MainThread): Parsing macros/materializations/view/view.sql
2019-08-26 10:41:41,298 (MainThread): Parsing macros/materializations/view/create_or_replace_view.sql
2019-08-26 10:41:41,303 (MainThread): Parsing macros/etc/get_custom_alias.sql
2019-08-26 10:41:41,304 (MainThread): Parsing macros/etc/query.sql
2019-08-26 10:41:41,305 (MainThread): Parsing macros/etc/is_incremental.sql
2019-08-26 10:41:41,307 (MainThread): Parsing macros/etc/get_relation_comment.sql
2019-08-26 10:41:41,309 (MainThread): Parsing macros/etc/datetime.sql
2019-08-26 10:41:41,317 (MainThread): Parsing macros/etc/get_custom_schema.sql
2019-08-26 10:41:41,319 (MainThread): Parsing macros/adapters/common.sql
2019-08-26 10:41:41,348 (MainThread): Parsing macros/schema_tests/relationships.sql
2019-08-26 10:41:41,350 (MainThread): Parsing macros/schema_tests/not_null.sql
2019-08-26 10:41:41,351 (MainThread): Parsing macros/schema_tests/unique.sql
2019-08-26 10:41:41,352 (MainThread): Parsing macros/schema_tests/accepted_values.sql
2019-08-26 10:41:41,354 (MainThread): Parsing macros/catalog.sql
2019-08-26 10:41:41,356 (MainThread): Parsing macros/relations.sql
2019-08-26 10:41:41,357 (MainThread): Parsing macros/adapters.sql
2019-08-26 10:41:41,367 (MainThread): Parsing macros/materializations/snapshot_merge.sql
2019-08-26 10:41:41,380 (MainThread): Parsing model.towers.distance_sita2019_spgmi2018
2019-08-26 10:41:41,381 (MainThread): Acquiring new postgres connection "distance_sita2019_spgmi2018".
2019-08-26 10:41:41,381 (MainThread): Opening a new connection, currently in state init
2019-08-26 10:41:41,698 (MainThread): Parsing model.towers.distance_spgmi2018_sita2019
2019-08-26 10:41:41,699 (MainThread): Acquiring new postgres connection "distance_spgmi2018_sita2019".
2019-08-26 10:41:41,699 (MainThread): Re-using an available connection from the pool (formerly distance_sita2019_spgmi2018).
2019-08-26 10:41:41,705 (MainThread): Parsing model.towers.towers_spgmisita100m_20180701
2019-08-26 10:41:41,706 (MainThread): Acquiring new postgres connection "towers_spgmisita100m_20180701".
2019-08-26 10:41:41,706 (MainThread): Re-using an available connection from the pool (formerly distance_spgmi2018_sita2019).
2019-08-26 10:41:41,710 (MainThread): Parsing model.towers.towers_transform_sita2009_nearspgmi
2019-08-26 10:41:41,711 (MainThread): Acquiring new postgres connection "towers_transform_sita2009_nearspgmi".
2019-08-26 10:41:41,711 (MainThread): Re-using an available connection from the pool (formerly towers_spgmisita100m_20180701).
2019-08-26 10:41:41,716 (MainThread): Parsing model.towers.towers_transform_spgmi2018_buffers
2019-08-26 10:41:41,717 (MainThread): Acquiring new postgres connection "towers_transform_spgmi2018_buffers".
2019-08-26 10:41:41,717 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2009_nearspgmi).
2019-08-26 10:41:41,723 (MainThread): Parsing model.towers.towers_transform_sita2019_buffers
2019-08-26 10:41:41,723 (MainThread): Acquiring new postgres connection "towers_transform_sita2019_buffers".
2019-08-26 10:41:41,724 (MainThread): Re-using an available connection from the pool (formerly towers_transform_spgmi2018_buffers).
2019-08-26 10:41:41,729 (MainThread): Parsing model.towers.towers_transform_sita2009_buffers
2019-08-26 10:41:41,729 (MainThread): Acquiring new postgres connection "towers_transform_sita2009_buffers".
2019-08-26 10:41:41,729 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2019_buffers).
2019-08-26 10:41:41,734 (MainThread): Parsing model.towers.base_towers_sita_2019
2019-08-26 10:41:41,735 (MainThread): Acquiring new postgres connection "base_towers_sita_2019".
2019-08-26 10:41:41,735 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2009_buffers).
2019-08-26 10:41:41,738 (MainThread): Parsing model.towers.base_towers_spgmi_2018
2019-08-26 10:41:41,739 (MainThread): Acquiring new postgres connection "base_towers_spgmi_2018".
2019-08-26 10:41:41,739 (MainThread): Re-using an available connection from the pool (formerly base_towers_sita_2019).
2019-08-26 10:41:41,742 (MainThread): Parsing model.towers.base_towers_sita_2009
2019-08-26 10:41:41,743 (MainThread): Acquiring new postgres connection "base_towers_sita_2009".
2019-08-26 10:41:41,743 (MainThread): Re-using an available connection from the pool (formerly base_towers_spgmi_2018).
2019-08-26 10:41:41,747 (MainThread): Parsing analysis.towers.morphology_spgmi_2018
2019-08-26 10:41:41,748 (MainThread): Acquiring new postgres connection "morphology_spgmi_2018".
2019-08-26 10:41:41,748 (MainThread): Re-using an available connection from the pool (formerly base_towers_sita_2009).
2019-08-26 10:41:41,780 (MainThread): Found 10 models, 0 tests, 0 snapshots, 1 analyses, 116 macros, 0 operations, 0 seed files, 0 sources
2019-08-26 10:41:41,781 (MainThread):
2019-08-26 10:41:41,781 (MainThread): Acquiring new postgres connection "master".
2019-08-26 10:41:41,781 (MainThread): Re-using an available connection from the pool (formerly morphology_spgmi_2018).
2019-08-26 10:41:41,792 (MainThread): Parsing macros/core.sql
2019-08-26 10:41:41,796 (MainThread): Parsing macros/materializations/helpers.sql
2019-08-26 10:41:41,802 (MainThread): Parsing macros/materializations/snapshot/snapshot_merge.sql
2019-08-26 10:41:41,804 (MainThread): Parsing macros/materializations/snapshot/strategies.sql
2019-08-26 10:41:41,815 (MainThread): Parsing macros/materializations/snapshot/snapshot.sql
2019-08-26 10:41:41,833 (MainThread): Parsing macros/materializations/seed/seed.sql
2019-08-26 10:41:41,846 (MainThread): Parsing macros/materializations/incremental/incremental.sql
2019-08-26 10:41:41,853 (MainThread): Parsing macros/materializations/common/merge.sql
2019-08-26 10:41:41,860 (MainThread): Parsing macros/materializations/table/table.sql
2019-08-26 10:41:41,865 (MainThread): Parsing macros/materializations/view/view.sql
2019-08-26 10:41:41,869 (MainThread): Parsing macros/materializations/view/create_or_replace_view.sql
2019-08-26 10:41:41,874 (MainThread): Parsing macros/etc/get_custom_alias.sql
2019-08-26 10:41:41,875 (MainThread): Parsing macros/etc/query.sql
2019-08-26 10:41:41,876 (MainThread): Parsing macros/etc/is_incremental.sql
2019-08-26 10:41:41,877 (MainThread): Parsing macros/etc/get_relation_comment.sql
2019-08-26 10:41:41,880 (MainThread): Parsing macros/etc/datetime.sql
2019-08-26 10:41:41,887 (MainThread): Parsing macros/etc/get_custom_schema.sql
2019-08-26 10:41:41,889 (MainThread): Parsing macros/adapters/common.sql
2019-08-26 10:41:41,918 (MainThread): Parsing macros/schema_tests/relationships.sql
2019-08-26 10:41:41,919 (MainThread): Parsing macros/schema_tests/not_null.sql
2019-08-26 10:41:41,920 (MainThread): Parsing macros/schema_tests/unique.sql
2019-08-26 10:41:41,921 (MainThread): Parsing macros/schema_tests/accepted_values.sql
2019-08-26 10:41:41,923 (MainThread): Parsing macros/catalog.sql
2019-08-26 10:41:41,926 (MainThread): Parsing macros/relations.sql
2019-08-26 10:41:41,927 (MainThread): Parsing macros/adapters.sql
2019-08-26 10:41:41,936 (MainThread): Parsing macros/materializations/snapshot_merge.sql
2019-08-26 10:41:42,036 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,036 (MainThread): On master:
select distinct nspname from pg_namespace
2019-08-26 10:41:42,119 (MainThread): SQL status: SELECT 370 in 0.08 seconds
2019-08-26 10:41:42,148 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,148 (MainThread): On master: BEGIN
2019-08-26 10:41:42,194 (MainThread): SQL status: BEGIN in 0.05 seconds
2019-08-26 10:41:42,195 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,195 (MainThread): On master: select
'reference' as database,
tablename as name,
schemaname as schema,
'table' as type
from pg_tables
where schemaname ilike 'tlee'
union all
select
'reference' as database,
viewname as name,
schemaname as schema,
'view' as type
from pg_views
where schemaname ilike 'tlee'
2019-08-26 10:41:42,314 (MainThread): SQL status: SELECT 261 in 0.12 seconds
2019-08-26 10:41:43,070 (MainThread): Using postgres connection "master".
2019-08-26 10:41:43,070 (MainThread): On master: --
--
with relation as (
select
pg_rewrite.ev_class as class,
pg_rewrite.oid as id
from pg_rewrite
),
class as (
select
oid as id,
relname as name,
relnamespace as schema,
relkind as kind
from pg_class
),
dependency as (
select
pg_depend.objid as id,
pg_depend.refobjid as ref
from pg_depend
),
schema as (
select
pg_namespace.oid as id,
pg_namespace.nspname as name
from pg_namespace
where nspname != 'information_schema' and nspname not like 'pg_%'
),
referenced as (
select
relation.id AS id,
referenced_class.name ,
referenced_class.schema ,
referenced_class.kind
from relation
join class as referenced_class on relation.class=referenced_class.id
where referenced_class.kind in ('r', 'v')
),
relationships as (
select
referenced.name as referenced_name,
referenced.schema as referenced_schema_id,
dependent_class.name as dependent_name,
dependent_class.schema as dependent_schema_id,
referenced.kind as kind
from referenced
join dependency on referenced.id=dependency.id
join class as dependent_class on dependency.ref=dependent_class.id
where
(referenced.name != dependent_class.name or
referenced.schema != dependent_class.schema)
)
select
referenced_schema.name as referenced_schema,
relationships.referenced_name as referenced_name,
dependent_schema.name as dependent_schema,
relationships.dependent_name as dependent_name
from relationships
join schema as dependent_schema on relationships.dependent_schema_id=dependent_schema.id
join schema as referenced_schema on relationships.referenced_schema_id=referenced_schema.id
group by referenced_schema, referenced_name, dependent_schema, dependent_name
order by referenced_schema, referenced_name, dependent_schema, dependent_name;
2019-08-26 10:41:46,706 (MainThread): SQL status: SELECT 49106 in 3.64 seconds
2019-08-26 10:42:04,957 (MainThread): On master: ROLLBACK
2019-08-26 10:42:04,995 (MainThread): Connection 'master' was left open.
2019-08-26 10:42:04,996 (MainThread): On master: Close
2019-08-26 10:42:04,997 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'end', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10edab110>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x1109d8910>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x1109d8cd0>]}
2019-08-26 10:42:05,196 (MainThread): Flushing usage events
2019-08-26 10:42:05,196 (MainThread): Encountered an error:
2019-08-26 10:42:05,196 (MainThread): Cache inconsistency detected: in add_link, referenced link key _ReferenceKey(database='reference', schema='tlee', identifier='cpp009_business_spend') not in cache!
2019-08-26 10:42:05,198 (MainThread): Traceback (most recent call last):
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 82, in main
results, succeeded = handle_and_check(args)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 151, in handle_and_check
task, res = run_from_args(parsed)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 216, in run_from_args
results = task.run()
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/runnable.py", line 282, in run
result = self.execute_with_hooks(selected_uids)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/runnable.py", line 252, in execute_with_hooks
self.before_run(adapter, selected_uids)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/run.py", line 150, in before_run
self.populate_adapter_cache(adapter)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/run.py", line 63, in populate_adapter_cache
adapter.set_relations_cache(self.manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 342, in set_relations_cache
self._relations_cache_for_schemas(manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 88, in _relations_cache_for_schemas
self._link_cached_relations(manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 84, in _link_cached_relations
self._link_cached_database_relations(schemas)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 60, in _link_cached_database_relations
self.cache.add_link(dependent, referenced)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/cache.py", line 296, in add_link
self._add_link(referenced, dependent)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/cache.py", line 254, in _add_link
.format(referenced_key)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/exceptions.py", line 473, in raise_cache_inconsistent
raise InternalException('Cache inconsistency detected: {}'.format(message))
dbt.exceptions.InternalException: Cache inconsistency detected: in add_link, referenced link key _ReferenceKey(database='reference', schema='tlee', identifier='cpp009_business_spend') not in cache!
|
dbt.exceptions.InternalException
|
def CTE(cls) -> str:
return str(RelationType.CTE)
|
def CTE(self) -> str:
return str(RelationType.CTE)
|
https://github.com/fishtown-analytics/dbt/issues/1698
|
dbt --debug run
Running with dbt=0.14.0
2019-08-26 10:41:40,953 (MainThread): Tracking: tracking
2019-08-26 10:41:40,959 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'start', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10eda3bd0>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10edae510>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10eda3b90>]}
2019-08-26 10:41:41,213 (MainThread): Parsing macros/core.sql
2019-08-26 10:41:41,219 (MainThread): Parsing macros/materializations/helpers.sql
2019-08-26 10:41:41,227 (MainThread): Parsing macros/materializations/snapshot/snapshot_merge.sql
2019-08-26 10:41:41,229 (MainThread): Parsing macros/materializations/snapshot/strategies.sql
2019-08-26 10:41:41,242 (MainThread): Parsing macros/materializations/snapshot/snapshot.sql
2019-08-26 10:41:41,261 (MainThread): Parsing macros/materializations/seed/seed.sql
2019-08-26 10:41:41,275 (MainThread): Parsing macros/materializations/incremental/incremental.sql
2019-08-26 10:41:41,282 (MainThread): Parsing macros/materializations/common/merge.sql
2019-08-26 10:41:41,288 (MainThread): Parsing macros/materializations/table/table.sql
2019-08-26 10:41:41,293 (MainThread): Parsing macros/materializations/view/view.sql
2019-08-26 10:41:41,298 (MainThread): Parsing macros/materializations/view/create_or_replace_view.sql
2019-08-26 10:41:41,303 (MainThread): Parsing macros/etc/get_custom_alias.sql
2019-08-26 10:41:41,304 (MainThread): Parsing macros/etc/query.sql
2019-08-26 10:41:41,305 (MainThread): Parsing macros/etc/is_incremental.sql
2019-08-26 10:41:41,307 (MainThread): Parsing macros/etc/get_relation_comment.sql
2019-08-26 10:41:41,309 (MainThread): Parsing macros/etc/datetime.sql
2019-08-26 10:41:41,317 (MainThread): Parsing macros/etc/get_custom_schema.sql
2019-08-26 10:41:41,319 (MainThread): Parsing macros/adapters/common.sql
2019-08-26 10:41:41,348 (MainThread): Parsing macros/schema_tests/relationships.sql
2019-08-26 10:41:41,350 (MainThread): Parsing macros/schema_tests/not_null.sql
2019-08-26 10:41:41,351 (MainThread): Parsing macros/schema_tests/unique.sql
2019-08-26 10:41:41,352 (MainThread): Parsing macros/schema_tests/accepted_values.sql
2019-08-26 10:41:41,354 (MainThread): Parsing macros/catalog.sql
2019-08-26 10:41:41,356 (MainThread): Parsing macros/relations.sql
2019-08-26 10:41:41,357 (MainThread): Parsing macros/adapters.sql
2019-08-26 10:41:41,367 (MainThread): Parsing macros/materializations/snapshot_merge.sql
2019-08-26 10:41:41,380 (MainThread): Parsing model.towers.distance_sita2019_spgmi2018
2019-08-26 10:41:41,381 (MainThread): Acquiring new postgres connection "distance_sita2019_spgmi2018".
2019-08-26 10:41:41,381 (MainThread): Opening a new connection, currently in state init
2019-08-26 10:41:41,698 (MainThread): Parsing model.towers.distance_spgmi2018_sita2019
2019-08-26 10:41:41,699 (MainThread): Acquiring new postgres connection "distance_spgmi2018_sita2019".
2019-08-26 10:41:41,699 (MainThread): Re-using an available connection from the pool (formerly distance_sita2019_spgmi2018).
2019-08-26 10:41:41,705 (MainThread): Parsing model.towers.towers_spgmisita100m_20180701
2019-08-26 10:41:41,706 (MainThread): Acquiring new postgres connection "towers_spgmisita100m_20180701".
2019-08-26 10:41:41,706 (MainThread): Re-using an available connection from the pool (formerly distance_spgmi2018_sita2019).
2019-08-26 10:41:41,710 (MainThread): Parsing model.towers.towers_transform_sita2009_nearspgmi
2019-08-26 10:41:41,711 (MainThread): Acquiring new postgres connection "towers_transform_sita2009_nearspgmi".
2019-08-26 10:41:41,711 (MainThread): Re-using an available connection from the pool (formerly towers_spgmisita100m_20180701).
2019-08-26 10:41:41,716 (MainThread): Parsing model.towers.towers_transform_spgmi2018_buffers
2019-08-26 10:41:41,717 (MainThread): Acquiring new postgres connection "towers_transform_spgmi2018_buffers".
2019-08-26 10:41:41,717 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2009_nearspgmi).
2019-08-26 10:41:41,723 (MainThread): Parsing model.towers.towers_transform_sita2019_buffers
2019-08-26 10:41:41,723 (MainThread): Acquiring new postgres connection "towers_transform_sita2019_buffers".
2019-08-26 10:41:41,724 (MainThread): Re-using an available connection from the pool (formerly towers_transform_spgmi2018_buffers).
2019-08-26 10:41:41,729 (MainThread): Parsing model.towers.towers_transform_sita2009_buffers
2019-08-26 10:41:41,729 (MainThread): Acquiring new postgres connection "towers_transform_sita2009_buffers".
2019-08-26 10:41:41,729 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2019_buffers).
2019-08-26 10:41:41,734 (MainThread): Parsing model.towers.base_towers_sita_2019
2019-08-26 10:41:41,735 (MainThread): Acquiring new postgres connection "base_towers_sita_2019".
2019-08-26 10:41:41,735 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2009_buffers).
2019-08-26 10:41:41,738 (MainThread): Parsing model.towers.base_towers_spgmi_2018
2019-08-26 10:41:41,739 (MainThread): Acquiring new postgres connection "base_towers_spgmi_2018".
2019-08-26 10:41:41,739 (MainThread): Re-using an available connection from the pool (formerly base_towers_sita_2019).
2019-08-26 10:41:41,742 (MainThread): Parsing model.towers.base_towers_sita_2009
2019-08-26 10:41:41,743 (MainThread): Acquiring new postgres connection "base_towers_sita_2009".
2019-08-26 10:41:41,743 (MainThread): Re-using an available connection from the pool (formerly base_towers_spgmi_2018).
2019-08-26 10:41:41,747 (MainThread): Parsing analysis.towers.morphology_spgmi_2018
2019-08-26 10:41:41,748 (MainThread): Acquiring new postgres connection "morphology_spgmi_2018".
2019-08-26 10:41:41,748 (MainThread): Re-using an available connection from the pool (formerly base_towers_sita_2009).
2019-08-26 10:41:41,780 (MainThread): Found 10 models, 0 tests, 0 snapshots, 1 analyses, 116 macros, 0 operations, 0 seed files, 0 sources
2019-08-26 10:41:41,781 (MainThread):
2019-08-26 10:41:41,781 (MainThread): Acquiring new postgres connection "master".
2019-08-26 10:41:41,781 (MainThread): Re-using an available connection from the pool (formerly morphology_spgmi_2018).
2019-08-26 10:41:41,792 (MainThread): Parsing macros/core.sql
2019-08-26 10:41:41,796 (MainThread): Parsing macros/materializations/helpers.sql
2019-08-26 10:41:41,802 (MainThread): Parsing macros/materializations/snapshot/snapshot_merge.sql
2019-08-26 10:41:41,804 (MainThread): Parsing macros/materializations/snapshot/strategies.sql
2019-08-26 10:41:41,815 (MainThread): Parsing macros/materializations/snapshot/snapshot.sql
2019-08-26 10:41:41,833 (MainThread): Parsing macros/materializations/seed/seed.sql
2019-08-26 10:41:41,846 (MainThread): Parsing macros/materializations/incremental/incremental.sql
2019-08-26 10:41:41,853 (MainThread): Parsing macros/materializations/common/merge.sql
2019-08-26 10:41:41,860 (MainThread): Parsing macros/materializations/table/table.sql
2019-08-26 10:41:41,865 (MainThread): Parsing macros/materializations/view/view.sql
2019-08-26 10:41:41,869 (MainThread): Parsing macros/materializations/view/create_or_replace_view.sql
2019-08-26 10:41:41,874 (MainThread): Parsing macros/etc/get_custom_alias.sql
2019-08-26 10:41:41,875 (MainThread): Parsing macros/etc/query.sql
2019-08-26 10:41:41,876 (MainThread): Parsing macros/etc/is_incremental.sql
2019-08-26 10:41:41,877 (MainThread): Parsing macros/etc/get_relation_comment.sql
2019-08-26 10:41:41,880 (MainThread): Parsing macros/etc/datetime.sql
2019-08-26 10:41:41,887 (MainThread): Parsing macros/etc/get_custom_schema.sql
2019-08-26 10:41:41,889 (MainThread): Parsing macros/adapters/common.sql
2019-08-26 10:41:41,918 (MainThread): Parsing macros/schema_tests/relationships.sql
2019-08-26 10:41:41,919 (MainThread): Parsing macros/schema_tests/not_null.sql
2019-08-26 10:41:41,920 (MainThread): Parsing macros/schema_tests/unique.sql
2019-08-26 10:41:41,921 (MainThread): Parsing macros/schema_tests/accepted_values.sql
2019-08-26 10:41:41,923 (MainThread): Parsing macros/catalog.sql
2019-08-26 10:41:41,926 (MainThread): Parsing macros/relations.sql
2019-08-26 10:41:41,927 (MainThread): Parsing macros/adapters.sql
2019-08-26 10:41:41,936 (MainThread): Parsing macros/materializations/snapshot_merge.sql
2019-08-26 10:41:42,036 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,036 (MainThread): On master:
select distinct nspname from pg_namespace
2019-08-26 10:41:42,119 (MainThread): SQL status: SELECT 370 in 0.08 seconds
2019-08-26 10:41:42,148 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,148 (MainThread): On master: BEGIN
2019-08-26 10:41:42,194 (MainThread): SQL status: BEGIN in 0.05 seconds
2019-08-26 10:41:42,195 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,195 (MainThread): On master: select
'reference' as database,
tablename as name,
schemaname as schema,
'table' as type
from pg_tables
where schemaname ilike 'tlee'
union all
select
'reference' as database,
viewname as name,
schemaname as schema,
'view' as type
from pg_views
where schemaname ilike 'tlee'
2019-08-26 10:41:42,314 (MainThread): SQL status: SELECT 261 in 0.12 seconds
2019-08-26 10:41:43,070 (MainThread): Using postgres connection "master".
2019-08-26 10:41:43,070 (MainThread): On master: --
--
with relation as (
select
pg_rewrite.ev_class as class,
pg_rewrite.oid as id
from pg_rewrite
),
class as (
select
oid as id,
relname as name,
relnamespace as schema,
relkind as kind
from pg_class
),
dependency as (
select
pg_depend.objid as id,
pg_depend.refobjid as ref
from pg_depend
),
schema as (
select
pg_namespace.oid as id,
pg_namespace.nspname as name
from pg_namespace
where nspname != 'information_schema' and nspname not like 'pg_%'
),
referenced as (
select
relation.id AS id,
referenced_class.name ,
referenced_class.schema ,
referenced_class.kind
from relation
join class as referenced_class on relation.class=referenced_class.id
where referenced_class.kind in ('r', 'v')
),
relationships as (
select
referenced.name as referenced_name,
referenced.schema as referenced_schema_id,
dependent_class.name as dependent_name,
dependent_class.schema as dependent_schema_id,
referenced.kind as kind
from referenced
join dependency on referenced.id=dependency.id
join class as dependent_class on dependency.ref=dependent_class.id
where
(referenced.name != dependent_class.name or
referenced.schema != dependent_class.schema)
)
select
referenced_schema.name as referenced_schema,
relationships.referenced_name as referenced_name,
dependent_schema.name as dependent_schema,
relationships.dependent_name as dependent_name
from relationships
join schema as dependent_schema on relationships.dependent_schema_id=dependent_schema.id
join schema as referenced_schema on relationships.referenced_schema_id=referenced_schema.id
group by referenced_schema, referenced_name, dependent_schema, dependent_name
order by referenced_schema, referenced_name, dependent_schema, dependent_name;
2019-08-26 10:41:46,706 (MainThread): SQL status: SELECT 49106 in 3.64 seconds
2019-08-26 10:42:04,957 (MainThread): On master: ROLLBACK
2019-08-26 10:42:04,995 (MainThread): Connection 'master' was left open.
2019-08-26 10:42:04,996 (MainThread): On master: Close
2019-08-26 10:42:04,997 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'end', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10edab110>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x1109d8910>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x1109d8cd0>]}
2019-08-26 10:42:05,196 (MainThread): Flushing usage events
2019-08-26 10:42:05,196 (MainThread): Encountered an error:
2019-08-26 10:42:05,196 (MainThread): Cache inconsistency detected: in add_link, referenced link key _ReferenceKey(database='reference', schema='tlee', identifier='cpp009_business_spend') not in cache!
2019-08-26 10:42:05,198 (MainThread): Traceback (most recent call last):
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 82, in main
results, succeeded = handle_and_check(args)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 151, in handle_and_check
task, res = run_from_args(parsed)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 216, in run_from_args
results = task.run()
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/runnable.py", line 282, in run
result = self.execute_with_hooks(selected_uids)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/runnable.py", line 252, in execute_with_hooks
self.before_run(adapter, selected_uids)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/run.py", line 150, in before_run
self.populate_adapter_cache(adapter)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/run.py", line 63, in populate_adapter_cache
adapter.set_relations_cache(self.manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 342, in set_relations_cache
self._relations_cache_for_schemas(manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 88, in _relations_cache_for_schemas
self._link_cached_relations(manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 84, in _link_cached_relations
self._link_cached_database_relations(schemas)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 60, in _link_cached_database_relations
self.cache.add_link(dependent, referenced)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/cache.py", line 296, in add_link
self._add_link(referenced, dependent)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/cache.py", line 254, in _add_link
.format(referenced_key)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/exceptions.py", line 473, in raise_cache_inconsistent
raise InternalException('Cache inconsistency detected: {}'.format(message))
dbt.exceptions.InternalException: Cache inconsistency detected: in add_link, referenced link key _ReferenceKey(database='reference', schema='tlee', identifier='cpp009_business_spend') not in cache!
|
dbt.exceptions.InternalException
|
def View(cls) -> str:
return str(RelationType.View)
|
def View(self) -> str:
return str(RelationType.View)
|
https://github.com/fishtown-analytics/dbt/issues/1698
|
dbt --debug run
Running with dbt=0.14.0
2019-08-26 10:41:40,953 (MainThread): Tracking: tracking
2019-08-26 10:41:40,959 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'start', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10eda3bd0>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10edae510>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10eda3b90>]}
2019-08-26 10:41:41,213 (MainThread): Parsing macros/core.sql
2019-08-26 10:41:41,219 (MainThread): Parsing macros/materializations/helpers.sql
2019-08-26 10:41:41,227 (MainThread): Parsing macros/materializations/snapshot/snapshot_merge.sql
2019-08-26 10:41:41,229 (MainThread): Parsing macros/materializations/snapshot/strategies.sql
2019-08-26 10:41:41,242 (MainThread): Parsing macros/materializations/snapshot/snapshot.sql
2019-08-26 10:41:41,261 (MainThread): Parsing macros/materializations/seed/seed.sql
2019-08-26 10:41:41,275 (MainThread): Parsing macros/materializations/incremental/incremental.sql
2019-08-26 10:41:41,282 (MainThread): Parsing macros/materializations/common/merge.sql
2019-08-26 10:41:41,288 (MainThread): Parsing macros/materializations/table/table.sql
2019-08-26 10:41:41,293 (MainThread): Parsing macros/materializations/view/view.sql
2019-08-26 10:41:41,298 (MainThread): Parsing macros/materializations/view/create_or_replace_view.sql
2019-08-26 10:41:41,303 (MainThread): Parsing macros/etc/get_custom_alias.sql
2019-08-26 10:41:41,304 (MainThread): Parsing macros/etc/query.sql
2019-08-26 10:41:41,305 (MainThread): Parsing macros/etc/is_incremental.sql
2019-08-26 10:41:41,307 (MainThread): Parsing macros/etc/get_relation_comment.sql
2019-08-26 10:41:41,309 (MainThread): Parsing macros/etc/datetime.sql
2019-08-26 10:41:41,317 (MainThread): Parsing macros/etc/get_custom_schema.sql
2019-08-26 10:41:41,319 (MainThread): Parsing macros/adapters/common.sql
2019-08-26 10:41:41,348 (MainThread): Parsing macros/schema_tests/relationships.sql
2019-08-26 10:41:41,350 (MainThread): Parsing macros/schema_tests/not_null.sql
2019-08-26 10:41:41,351 (MainThread): Parsing macros/schema_tests/unique.sql
2019-08-26 10:41:41,352 (MainThread): Parsing macros/schema_tests/accepted_values.sql
2019-08-26 10:41:41,354 (MainThread): Parsing macros/catalog.sql
2019-08-26 10:41:41,356 (MainThread): Parsing macros/relations.sql
2019-08-26 10:41:41,357 (MainThread): Parsing macros/adapters.sql
2019-08-26 10:41:41,367 (MainThread): Parsing macros/materializations/snapshot_merge.sql
2019-08-26 10:41:41,380 (MainThread): Parsing model.towers.distance_sita2019_spgmi2018
2019-08-26 10:41:41,381 (MainThread): Acquiring new postgres connection "distance_sita2019_spgmi2018".
2019-08-26 10:41:41,381 (MainThread): Opening a new connection, currently in state init
2019-08-26 10:41:41,698 (MainThread): Parsing model.towers.distance_spgmi2018_sita2019
2019-08-26 10:41:41,699 (MainThread): Acquiring new postgres connection "distance_spgmi2018_sita2019".
2019-08-26 10:41:41,699 (MainThread): Re-using an available connection from the pool (formerly distance_sita2019_spgmi2018).
2019-08-26 10:41:41,705 (MainThread): Parsing model.towers.towers_spgmisita100m_20180701
2019-08-26 10:41:41,706 (MainThread): Acquiring new postgres connection "towers_spgmisita100m_20180701".
2019-08-26 10:41:41,706 (MainThread): Re-using an available connection from the pool (formerly distance_spgmi2018_sita2019).
2019-08-26 10:41:41,710 (MainThread): Parsing model.towers.towers_transform_sita2009_nearspgmi
2019-08-26 10:41:41,711 (MainThread): Acquiring new postgres connection "towers_transform_sita2009_nearspgmi".
2019-08-26 10:41:41,711 (MainThread): Re-using an available connection from the pool (formerly towers_spgmisita100m_20180701).
2019-08-26 10:41:41,716 (MainThread): Parsing model.towers.towers_transform_spgmi2018_buffers
2019-08-26 10:41:41,717 (MainThread): Acquiring new postgres connection "towers_transform_spgmi2018_buffers".
2019-08-26 10:41:41,717 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2009_nearspgmi).
2019-08-26 10:41:41,723 (MainThread): Parsing model.towers.towers_transform_sita2019_buffers
2019-08-26 10:41:41,723 (MainThread): Acquiring new postgres connection "towers_transform_sita2019_buffers".
2019-08-26 10:41:41,724 (MainThread): Re-using an available connection from the pool (formerly towers_transform_spgmi2018_buffers).
2019-08-26 10:41:41,729 (MainThread): Parsing model.towers.towers_transform_sita2009_buffers
2019-08-26 10:41:41,729 (MainThread): Acquiring new postgres connection "towers_transform_sita2009_buffers".
2019-08-26 10:41:41,729 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2019_buffers).
2019-08-26 10:41:41,734 (MainThread): Parsing model.towers.base_towers_sita_2019
2019-08-26 10:41:41,735 (MainThread): Acquiring new postgres connection "base_towers_sita_2019".
2019-08-26 10:41:41,735 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2009_buffers).
2019-08-26 10:41:41,738 (MainThread): Parsing model.towers.base_towers_spgmi_2018
2019-08-26 10:41:41,739 (MainThread): Acquiring new postgres connection "base_towers_spgmi_2018".
2019-08-26 10:41:41,739 (MainThread): Re-using an available connection from the pool (formerly base_towers_sita_2019).
2019-08-26 10:41:41,742 (MainThread): Parsing model.towers.base_towers_sita_2009
2019-08-26 10:41:41,743 (MainThread): Acquiring new postgres connection "base_towers_sita_2009".
2019-08-26 10:41:41,743 (MainThread): Re-using an available connection from the pool (formerly base_towers_spgmi_2018).
2019-08-26 10:41:41,747 (MainThread): Parsing analysis.towers.morphology_spgmi_2018
2019-08-26 10:41:41,748 (MainThread): Acquiring new postgres connection "morphology_spgmi_2018".
2019-08-26 10:41:41,748 (MainThread): Re-using an available connection from the pool (formerly base_towers_sita_2009).
2019-08-26 10:41:41,780 (MainThread): Found 10 models, 0 tests, 0 snapshots, 1 analyses, 116 macros, 0 operations, 0 seed files, 0 sources
2019-08-26 10:41:41,781 (MainThread):
2019-08-26 10:41:41,781 (MainThread): Acquiring new postgres connection "master".
2019-08-26 10:41:41,781 (MainThread): Re-using an available connection from the pool (formerly morphology_spgmi_2018).
2019-08-26 10:41:41,792 (MainThread): Parsing macros/core.sql
2019-08-26 10:41:41,796 (MainThread): Parsing macros/materializations/helpers.sql
2019-08-26 10:41:41,802 (MainThread): Parsing macros/materializations/snapshot/snapshot_merge.sql
2019-08-26 10:41:41,804 (MainThread): Parsing macros/materializations/snapshot/strategies.sql
2019-08-26 10:41:41,815 (MainThread): Parsing macros/materializations/snapshot/snapshot.sql
2019-08-26 10:41:41,833 (MainThread): Parsing macros/materializations/seed/seed.sql
2019-08-26 10:41:41,846 (MainThread): Parsing macros/materializations/incremental/incremental.sql
2019-08-26 10:41:41,853 (MainThread): Parsing macros/materializations/common/merge.sql
2019-08-26 10:41:41,860 (MainThread): Parsing macros/materializations/table/table.sql
2019-08-26 10:41:41,865 (MainThread): Parsing macros/materializations/view/view.sql
2019-08-26 10:41:41,869 (MainThread): Parsing macros/materializations/view/create_or_replace_view.sql
2019-08-26 10:41:41,874 (MainThread): Parsing macros/etc/get_custom_alias.sql
2019-08-26 10:41:41,875 (MainThread): Parsing macros/etc/query.sql
2019-08-26 10:41:41,876 (MainThread): Parsing macros/etc/is_incremental.sql
2019-08-26 10:41:41,877 (MainThread): Parsing macros/etc/get_relation_comment.sql
2019-08-26 10:41:41,880 (MainThread): Parsing macros/etc/datetime.sql
2019-08-26 10:41:41,887 (MainThread): Parsing macros/etc/get_custom_schema.sql
2019-08-26 10:41:41,889 (MainThread): Parsing macros/adapters/common.sql
2019-08-26 10:41:41,918 (MainThread): Parsing macros/schema_tests/relationships.sql
2019-08-26 10:41:41,919 (MainThread): Parsing macros/schema_tests/not_null.sql
2019-08-26 10:41:41,920 (MainThread): Parsing macros/schema_tests/unique.sql
2019-08-26 10:41:41,921 (MainThread): Parsing macros/schema_tests/accepted_values.sql
2019-08-26 10:41:41,923 (MainThread): Parsing macros/catalog.sql
2019-08-26 10:41:41,926 (MainThread): Parsing macros/relations.sql
2019-08-26 10:41:41,927 (MainThread): Parsing macros/adapters.sql
2019-08-26 10:41:41,936 (MainThread): Parsing macros/materializations/snapshot_merge.sql
2019-08-26 10:41:42,036 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,036 (MainThread): On master:
select distinct nspname from pg_namespace
2019-08-26 10:41:42,119 (MainThread): SQL status: SELECT 370 in 0.08 seconds
2019-08-26 10:41:42,148 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,148 (MainThread): On master: BEGIN
2019-08-26 10:41:42,194 (MainThread): SQL status: BEGIN in 0.05 seconds
2019-08-26 10:41:42,195 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,195 (MainThread): On master: select
'reference' as database,
tablename as name,
schemaname as schema,
'table' as type
from pg_tables
where schemaname ilike 'tlee'
union all
select
'reference' as database,
viewname as name,
schemaname as schema,
'view' as type
from pg_views
where schemaname ilike 'tlee'
2019-08-26 10:41:42,314 (MainThread): SQL status: SELECT 261 in 0.12 seconds
2019-08-26 10:41:43,070 (MainThread): Using postgres connection "master".
2019-08-26 10:41:43,070 (MainThread): On master: --
--
with relation as (
select
pg_rewrite.ev_class as class,
pg_rewrite.oid as id
from pg_rewrite
),
class as (
select
oid as id,
relname as name,
relnamespace as schema,
relkind as kind
from pg_class
),
dependency as (
select
pg_depend.objid as id,
pg_depend.refobjid as ref
from pg_depend
),
schema as (
select
pg_namespace.oid as id,
pg_namespace.nspname as name
from pg_namespace
where nspname != 'information_schema' and nspname not like 'pg_%'
),
referenced as (
select
relation.id AS id,
referenced_class.name ,
referenced_class.schema ,
referenced_class.kind
from relation
join class as referenced_class on relation.class=referenced_class.id
where referenced_class.kind in ('r', 'v')
),
relationships as (
select
referenced.name as referenced_name,
referenced.schema as referenced_schema_id,
dependent_class.name as dependent_name,
dependent_class.schema as dependent_schema_id,
referenced.kind as kind
from referenced
join dependency on referenced.id=dependency.id
join class as dependent_class on dependency.ref=dependent_class.id
where
(referenced.name != dependent_class.name or
referenced.schema != dependent_class.schema)
)
select
referenced_schema.name as referenced_schema,
relationships.referenced_name as referenced_name,
dependent_schema.name as dependent_schema,
relationships.dependent_name as dependent_name
from relationships
join schema as dependent_schema on relationships.dependent_schema_id=dependent_schema.id
join schema as referenced_schema on relationships.referenced_schema_id=referenced_schema.id
group by referenced_schema, referenced_name, dependent_schema, dependent_name
order by referenced_schema, referenced_name, dependent_schema, dependent_name;
2019-08-26 10:41:46,706 (MainThread): SQL status: SELECT 49106 in 3.64 seconds
2019-08-26 10:42:04,957 (MainThread): On master: ROLLBACK
2019-08-26 10:42:04,995 (MainThread): Connection 'master' was left open.
2019-08-26 10:42:04,996 (MainThread): On master: Close
2019-08-26 10:42:04,997 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'end', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10edab110>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x1109d8910>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x1109d8cd0>]}
2019-08-26 10:42:05,196 (MainThread): Flushing usage events
2019-08-26 10:42:05,196 (MainThread): Encountered an error:
2019-08-26 10:42:05,196 (MainThread): Cache inconsistency detected: in add_link, referenced link key _ReferenceKey(database='reference', schema='tlee', identifier='cpp009_business_spend') not in cache!
2019-08-26 10:42:05,198 (MainThread): Traceback (most recent call last):
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 82, in main
results, succeeded = handle_and_check(args)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 151, in handle_and_check
task, res = run_from_args(parsed)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 216, in run_from_args
results = task.run()
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/runnable.py", line 282, in run
result = self.execute_with_hooks(selected_uids)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/runnable.py", line 252, in execute_with_hooks
self.before_run(adapter, selected_uids)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/run.py", line 150, in before_run
self.populate_adapter_cache(adapter)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/run.py", line 63, in populate_adapter_cache
adapter.set_relations_cache(self.manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 342, in set_relations_cache
self._relations_cache_for_schemas(manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 88, in _relations_cache_for_schemas
self._link_cached_relations(manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 84, in _link_cached_relations
self._link_cached_database_relations(schemas)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 60, in _link_cached_database_relations
self.cache.add_link(dependent, referenced)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/cache.py", line 296, in add_link
self._add_link(referenced, dependent)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/cache.py", line 254, in _add_link
.format(referenced_key)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/exceptions.py", line 473, in raise_cache_inconsistent
raise InternalException('Cache inconsistency detected: {}'.format(message))
dbt.exceptions.InternalException: Cache inconsistency detected: in add_link, referenced link key _ReferenceKey(database='reference', schema='tlee', identifier='cpp009_business_spend') not in cache!
|
dbt.exceptions.InternalException
|
def External(cls) -> str:
return str(RelationType.External)
|
def External(self) -> str:
return str(RelationType.External)
|
https://github.com/fishtown-analytics/dbt/issues/1698
|
dbt --debug run
Running with dbt=0.14.0
2019-08-26 10:41:40,953 (MainThread): Tracking: tracking
2019-08-26 10:41:40,959 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'start', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10eda3bd0>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10edae510>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10eda3b90>]}
2019-08-26 10:41:41,213 (MainThread): Parsing macros/core.sql
2019-08-26 10:41:41,219 (MainThread): Parsing macros/materializations/helpers.sql
2019-08-26 10:41:41,227 (MainThread): Parsing macros/materializations/snapshot/snapshot_merge.sql
2019-08-26 10:41:41,229 (MainThread): Parsing macros/materializations/snapshot/strategies.sql
2019-08-26 10:41:41,242 (MainThread): Parsing macros/materializations/snapshot/snapshot.sql
2019-08-26 10:41:41,261 (MainThread): Parsing macros/materializations/seed/seed.sql
2019-08-26 10:41:41,275 (MainThread): Parsing macros/materializations/incremental/incremental.sql
2019-08-26 10:41:41,282 (MainThread): Parsing macros/materializations/common/merge.sql
2019-08-26 10:41:41,288 (MainThread): Parsing macros/materializations/table/table.sql
2019-08-26 10:41:41,293 (MainThread): Parsing macros/materializations/view/view.sql
2019-08-26 10:41:41,298 (MainThread): Parsing macros/materializations/view/create_or_replace_view.sql
2019-08-26 10:41:41,303 (MainThread): Parsing macros/etc/get_custom_alias.sql
2019-08-26 10:41:41,304 (MainThread): Parsing macros/etc/query.sql
2019-08-26 10:41:41,305 (MainThread): Parsing macros/etc/is_incremental.sql
2019-08-26 10:41:41,307 (MainThread): Parsing macros/etc/get_relation_comment.sql
2019-08-26 10:41:41,309 (MainThread): Parsing macros/etc/datetime.sql
2019-08-26 10:41:41,317 (MainThread): Parsing macros/etc/get_custom_schema.sql
2019-08-26 10:41:41,319 (MainThread): Parsing macros/adapters/common.sql
2019-08-26 10:41:41,348 (MainThread): Parsing macros/schema_tests/relationships.sql
2019-08-26 10:41:41,350 (MainThread): Parsing macros/schema_tests/not_null.sql
2019-08-26 10:41:41,351 (MainThread): Parsing macros/schema_tests/unique.sql
2019-08-26 10:41:41,352 (MainThread): Parsing macros/schema_tests/accepted_values.sql
2019-08-26 10:41:41,354 (MainThread): Parsing macros/catalog.sql
2019-08-26 10:41:41,356 (MainThread): Parsing macros/relations.sql
2019-08-26 10:41:41,357 (MainThread): Parsing macros/adapters.sql
2019-08-26 10:41:41,367 (MainThread): Parsing macros/materializations/snapshot_merge.sql
2019-08-26 10:41:41,380 (MainThread): Parsing model.towers.distance_sita2019_spgmi2018
2019-08-26 10:41:41,381 (MainThread): Acquiring new postgres connection "distance_sita2019_spgmi2018".
2019-08-26 10:41:41,381 (MainThread): Opening a new connection, currently in state init
2019-08-26 10:41:41,698 (MainThread): Parsing model.towers.distance_spgmi2018_sita2019
2019-08-26 10:41:41,699 (MainThread): Acquiring new postgres connection "distance_spgmi2018_sita2019".
2019-08-26 10:41:41,699 (MainThread): Re-using an available connection from the pool (formerly distance_sita2019_spgmi2018).
2019-08-26 10:41:41,705 (MainThread): Parsing model.towers.towers_spgmisita100m_20180701
2019-08-26 10:41:41,706 (MainThread): Acquiring new postgres connection "towers_spgmisita100m_20180701".
2019-08-26 10:41:41,706 (MainThread): Re-using an available connection from the pool (formerly distance_spgmi2018_sita2019).
2019-08-26 10:41:41,710 (MainThread): Parsing model.towers.towers_transform_sita2009_nearspgmi
2019-08-26 10:41:41,711 (MainThread): Acquiring new postgres connection "towers_transform_sita2009_nearspgmi".
2019-08-26 10:41:41,711 (MainThread): Re-using an available connection from the pool (formerly towers_spgmisita100m_20180701).
2019-08-26 10:41:41,716 (MainThread): Parsing model.towers.towers_transform_spgmi2018_buffers
2019-08-26 10:41:41,717 (MainThread): Acquiring new postgres connection "towers_transform_spgmi2018_buffers".
2019-08-26 10:41:41,717 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2009_nearspgmi).
2019-08-26 10:41:41,723 (MainThread): Parsing model.towers.towers_transform_sita2019_buffers
2019-08-26 10:41:41,723 (MainThread): Acquiring new postgres connection "towers_transform_sita2019_buffers".
2019-08-26 10:41:41,724 (MainThread): Re-using an available connection from the pool (formerly towers_transform_spgmi2018_buffers).
2019-08-26 10:41:41,729 (MainThread): Parsing model.towers.towers_transform_sita2009_buffers
2019-08-26 10:41:41,729 (MainThread): Acquiring new postgres connection "towers_transform_sita2009_buffers".
2019-08-26 10:41:41,729 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2019_buffers).
2019-08-26 10:41:41,734 (MainThread): Parsing model.towers.base_towers_sita_2019
2019-08-26 10:41:41,735 (MainThread): Acquiring new postgres connection "base_towers_sita_2019".
2019-08-26 10:41:41,735 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2009_buffers).
2019-08-26 10:41:41,738 (MainThread): Parsing model.towers.base_towers_spgmi_2018
2019-08-26 10:41:41,739 (MainThread): Acquiring new postgres connection "base_towers_spgmi_2018".
2019-08-26 10:41:41,739 (MainThread): Re-using an available connection from the pool (formerly base_towers_sita_2019).
2019-08-26 10:41:41,742 (MainThread): Parsing model.towers.base_towers_sita_2009
2019-08-26 10:41:41,743 (MainThread): Acquiring new postgres connection "base_towers_sita_2009".
2019-08-26 10:41:41,743 (MainThread): Re-using an available connection from the pool (formerly base_towers_spgmi_2018).
2019-08-26 10:41:41,747 (MainThread): Parsing analysis.towers.morphology_spgmi_2018
2019-08-26 10:41:41,748 (MainThread): Acquiring new postgres connection "morphology_spgmi_2018".
2019-08-26 10:41:41,748 (MainThread): Re-using an available connection from the pool (formerly base_towers_sita_2009).
2019-08-26 10:41:41,780 (MainThread): Found 10 models, 0 tests, 0 snapshots, 1 analyses, 116 macros, 0 operations, 0 seed files, 0 sources
2019-08-26 10:41:41,781 (MainThread):
2019-08-26 10:41:41,781 (MainThread): Acquiring new postgres connection "master".
2019-08-26 10:41:41,781 (MainThread): Re-using an available connection from the pool (formerly morphology_spgmi_2018).
2019-08-26 10:41:41,792 (MainThread): Parsing macros/core.sql
2019-08-26 10:41:41,796 (MainThread): Parsing macros/materializations/helpers.sql
2019-08-26 10:41:41,802 (MainThread): Parsing macros/materializations/snapshot/snapshot_merge.sql
2019-08-26 10:41:41,804 (MainThread): Parsing macros/materializations/snapshot/strategies.sql
2019-08-26 10:41:41,815 (MainThread): Parsing macros/materializations/snapshot/snapshot.sql
2019-08-26 10:41:41,833 (MainThread): Parsing macros/materializations/seed/seed.sql
2019-08-26 10:41:41,846 (MainThread): Parsing macros/materializations/incremental/incremental.sql
2019-08-26 10:41:41,853 (MainThread): Parsing macros/materializations/common/merge.sql
2019-08-26 10:41:41,860 (MainThread): Parsing macros/materializations/table/table.sql
2019-08-26 10:41:41,865 (MainThread): Parsing macros/materializations/view/view.sql
2019-08-26 10:41:41,869 (MainThread): Parsing macros/materializations/view/create_or_replace_view.sql
2019-08-26 10:41:41,874 (MainThread): Parsing macros/etc/get_custom_alias.sql
2019-08-26 10:41:41,875 (MainThread): Parsing macros/etc/query.sql
2019-08-26 10:41:41,876 (MainThread): Parsing macros/etc/is_incremental.sql
2019-08-26 10:41:41,877 (MainThread): Parsing macros/etc/get_relation_comment.sql
2019-08-26 10:41:41,880 (MainThread): Parsing macros/etc/datetime.sql
2019-08-26 10:41:41,887 (MainThread): Parsing macros/etc/get_custom_schema.sql
2019-08-26 10:41:41,889 (MainThread): Parsing macros/adapters/common.sql
2019-08-26 10:41:41,918 (MainThread): Parsing macros/schema_tests/relationships.sql
2019-08-26 10:41:41,919 (MainThread): Parsing macros/schema_tests/not_null.sql
2019-08-26 10:41:41,920 (MainThread): Parsing macros/schema_tests/unique.sql
2019-08-26 10:41:41,921 (MainThread): Parsing macros/schema_tests/accepted_values.sql
2019-08-26 10:41:41,923 (MainThread): Parsing macros/catalog.sql
2019-08-26 10:41:41,926 (MainThread): Parsing macros/relations.sql
2019-08-26 10:41:41,927 (MainThread): Parsing macros/adapters.sql
2019-08-26 10:41:41,936 (MainThread): Parsing macros/materializations/snapshot_merge.sql
2019-08-26 10:41:42,036 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,036 (MainThread): On master:
select distinct nspname from pg_namespace
2019-08-26 10:41:42,119 (MainThread): SQL status: SELECT 370 in 0.08 seconds
2019-08-26 10:41:42,148 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,148 (MainThread): On master: BEGIN
2019-08-26 10:41:42,194 (MainThread): SQL status: BEGIN in 0.05 seconds
2019-08-26 10:41:42,195 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,195 (MainThread): On master: select
'reference' as database,
tablename as name,
schemaname as schema,
'table' as type
from pg_tables
where schemaname ilike 'tlee'
union all
select
'reference' as database,
viewname as name,
schemaname as schema,
'view' as type
from pg_views
where schemaname ilike 'tlee'
2019-08-26 10:41:42,314 (MainThread): SQL status: SELECT 261 in 0.12 seconds
2019-08-26 10:41:43,070 (MainThread): Using postgres connection "master".
2019-08-26 10:41:43,070 (MainThread): On master: --
--
with relation as (
select
pg_rewrite.ev_class as class,
pg_rewrite.oid as id
from pg_rewrite
),
class as (
select
oid as id,
relname as name,
relnamespace as schema,
relkind as kind
from pg_class
),
dependency as (
select
pg_depend.objid as id,
pg_depend.refobjid as ref
from pg_depend
),
schema as (
select
pg_namespace.oid as id,
pg_namespace.nspname as name
from pg_namespace
where nspname != 'information_schema' and nspname not like 'pg_%'
),
referenced as (
select
relation.id AS id,
referenced_class.name ,
referenced_class.schema ,
referenced_class.kind
from relation
join class as referenced_class on relation.class=referenced_class.id
where referenced_class.kind in ('r', 'v')
),
relationships as (
select
referenced.name as referenced_name,
referenced.schema as referenced_schema_id,
dependent_class.name as dependent_name,
dependent_class.schema as dependent_schema_id,
referenced.kind as kind
from referenced
join dependency on referenced.id=dependency.id
join class as dependent_class on dependency.ref=dependent_class.id
where
(referenced.name != dependent_class.name or
referenced.schema != dependent_class.schema)
)
select
referenced_schema.name as referenced_schema,
relationships.referenced_name as referenced_name,
dependent_schema.name as dependent_schema,
relationships.dependent_name as dependent_name
from relationships
join schema as dependent_schema on relationships.dependent_schema_id=dependent_schema.id
join schema as referenced_schema on relationships.referenced_schema_id=referenced_schema.id
group by referenced_schema, referenced_name, dependent_schema, dependent_name
order by referenced_schema, referenced_name, dependent_schema, dependent_name;
2019-08-26 10:41:46,706 (MainThread): SQL status: SELECT 49106 in 3.64 seconds
2019-08-26 10:42:04,957 (MainThread): On master: ROLLBACK
2019-08-26 10:42:04,995 (MainThread): Connection 'master' was left open.
2019-08-26 10:42:04,996 (MainThread): On master: Close
2019-08-26 10:42:04,997 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'end', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10edab110>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x1109d8910>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x1109d8cd0>]}
2019-08-26 10:42:05,196 (MainThread): Flushing usage events
2019-08-26 10:42:05,196 (MainThread): Encountered an error:
2019-08-26 10:42:05,196 (MainThread): Cache inconsistency detected: in add_link, referenced link key _ReferenceKey(database='reference', schema='tlee', identifier='cpp009_business_spend') not in cache!
2019-08-26 10:42:05,198 (MainThread): Traceback (most recent call last):
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 82, in main
results, succeeded = handle_and_check(args)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 151, in handle_and_check
task, res = run_from_args(parsed)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 216, in run_from_args
results = task.run()
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/runnable.py", line 282, in run
result = self.execute_with_hooks(selected_uids)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/runnable.py", line 252, in execute_with_hooks
self.before_run(adapter, selected_uids)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/run.py", line 150, in before_run
self.populate_adapter_cache(adapter)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/run.py", line 63, in populate_adapter_cache
adapter.set_relations_cache(self.manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 342, in set_relations_cache
self._relations_cache_for_schemas(manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 88, in _relations_cache_for_schemas
self._link_cached_relations(manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 84, in _link_cached_relations
self._link_cached_database_relations(schemas)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 60, in _link_cached_database_relations
self.cache.add_link(dependent, referenced)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/cache.py", line 296, in add_link
self._add_link(referenced, dependent)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/cache.py", line 254, in _add_link
.format(referenced_key)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/exceptions.py", line 473, in raise_cache_inconsistent
raise InternalException('Cache inconsistency detected: {}'.format(message))
dbt.exceptions.InternalException: Cache inconsistency detected: in add_link, referenced link key _ReferenceKey(database='reference', schema='tlee', identifier='cpp009_business_spend') not in cache!
|
dbt.exceptions.InternalException
|
def _add_link(self, referenced_key, dependent_key):
"""Add a link between two relations to the database. Both the old and
new entries must alraedy exist in the database.
:param _ReferenceKey referenced_key: The key identifying the referenced
model (the one that if dropped will drop the dependent model).
:param _ReferenceKey dependent_key: The key identifying the dependent
model.
:raises InternalError: If either entry does not exist.
"""
referenced = self.relations.get(referenced_key)
if referenced is None:
return
if referenced is None:
dbt.exceptions.raise_cache_inconsistent(
"in add_link, referenced link key {} not in cache!".format(referenced_key)
)
dependent = self.relations.get(dependent_key)
if dependent is None:
dbt.exceptions.raise_cache_inconsistent(
"in add_link, dependent link key {} not in cache!".format(dependent_key)
)
referenced.add_reference(dependent)
|
def _add_link(self, referenced_key, dependent_key):
"""Add a link between two relations to the database. Both the old and
new entries must alraedy exist in the database.
:param _ReferenceKey referenced_key: The key identifying the referenced
model (the one that if dropped will drop the dependent model).
:param _ReferenceKey dependent_key: The key identifying the dependent
model.
:raises InternalError: If either entry does not exist.
"""
referenced = self.relations.get(referenced_key)
if referenced is None:
dbt.exceptions.raise_cache_inconsistent(
"in add_link, referenced link key {} not in cache!".format(referenced_key)
)
dependent = self.relations.get(dependent_key)
if dependent is None:
dbt.exceptions.raise_cache_inconsistent(
"in add_link, dependent link key {} not in cache!".format(dependent_key)
)
referenced.add_reference(dependent)
|
https://github.com/fishtown-analytics/dbt/issues/1698
|
dbt --debug run
Running with dbt=0.14.0
2019-08-26 10:41:40,953 (MainThread): Tracking: tracking
2019-08-26 10:41:40,959 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'start', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10eda3bd0>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10edae510>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10eda3b90>]}
2019-08-26 10:41:41,213 (MainThread): Parsing macros/core.sql
2019-08-26 10:41:41,219 (MainThread): Parsing macros/materializations/helpers.sql
2019-08-26 10:41:41,227 (MainThread): Parsing macros/materializations/snapshot/snapshot_merge.sql
2019-08-26 10:41:41,229 (MainThread): Parsing macros/materializations/snapshot/strategies.sql
2019-08-26 10:41:41,242 (MainThread): Parsing macros/materializations/snapshot/snapshot.sql
2019-08-26 10:41:41,261 (MainThread): Parsing macros/materializations/seed/seed.sql
2019-08-26 10:41:41,275 (MainThread): Parsing macros/materializations/incremental/incremental.sql
2019-08-26 10:41:41,282 (MainThread): Parsing macros/materializations/common/merge.sql
2019-08-26 10:41:41,288 (MainThread): Parsing macros/materializations/table/table.sql
2019-08-26 10:41:41,293 (MainThread): Parsing macros/materializations/view/view.sql
2019-08-26 10:41:41,298 (MainThread): Parsing macros/materializations/view/create_or_replace_view.sql
2019-08-26 10:41:41,303 (MainThread): Parsing macros/etc/get_custom_alias.sql
2019-08-26 10:41:41,304 (MainThread): Parsing macros/etc/query.sql
2019-08-26 10:41:41,305 (MainThread): Parsing macros/etc/is_incremental.sql
2019-08-26 10:41:41,307 (MainThread): Parsing macros/etc/get_relation_comment.sql
2019-08-26 10:41:41,309 (MainThread): Parsing macros/etc/datetime.sql
2019-08-26 10:41:41,317 (MainThread): Parsing macros/etc/get_custom_schema.sql
2019-08-26 10:41:41,319 (MainThread): Parsing macros/adapters/common.sql
2019-08-26 10:41:41,348 (MainThread): Parsing macros/schema_tests/relationships.sql
2019-08-26 10:41:41,350 (MainThread): Parsing macros/schema_tests/not_null.sql
2019-08-26 10:41:41,351 (MainThread): Parsing macros/schema_tests/unique.sql
2019-08-26 10:41:41,352 (MainThread): Parsing macros/schema_tests/accepted_values.sql
2019-08-26 10:41:41,354 (MainThread): Parsing macros/catalog.sql
2019-08-26 10:41:41,356 (MainThread): Parsing macros/relations.sql
2019-08-26 10:41:41,357 (MainThread): Parsing macros/adapters.sql
2019-08-26 10:41:41,367 (MainThread): Parsing macros/materializations/snapshot_merge.sql
2019-08-26 10:41:41,380 (MainThread): Parsing model.towers.distance_sita2019_spgmi2018
2019-08-26 10:41:41,381 (MainThread): Acquiring new postgres connection "distance_sita2019_spgmi2018".
2019-08-26 10:41:41,381 (MainThread): Opening a new connection, currently in state init
2019-08-26 10:41:41,698 (MainThread): Parsing model.towers.distance_spgmi2018_sita2019
2019-08-26 10:41:41,699 (MainThread): Acquiring new postgres connection "distance_spgmi2018_sita2019".
2019-08-26 10:41:41,699 (MainThread): Re-using an available connection from the pool (formerly distance_sita2019_spgmi2018).
2019-08-26 10:41:41,705 (MainThread): Parsing model.towers.towers_spgmisita100m_20180701
2019-08-26 10:41:41,706 (MainThread): Acquiring new postgres connection "towers_spgmisita100m_20180701".
2019-08-26 10:41:41,706 (MainThread): Re-using an available connection from the pool (formerly distance_spgmi2018_sita2019).
2019-08-26 10:41:41,710 (MainThread): Parsing model.towers.towers_transform_sita2009_nearspgmi
2019-08-26 10:41:41,711 (MainThread): Acquiring new postgres connection "towers_transform_sita2009_nearspgmi".
2019-08-26 10:41:41,711 (MainThread): Re-using an available connection from the pool (formerly towers_spgmisita100m_20180701).
2019-08-26 10:41:41,716 (MainThread): Parsing model.towers.towers_transform_spgmi2018_buffers
2019-08-26 10:41:41,717 (MainThread): Acquiring new postgres connection "towers_transform_spgmi2018_buffers".
2019-08-26 10:41:41,717 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2009_nearspgmi).
2019-08-26 10:41:41,723 (MainThread): Parsing model.towers.towers_transform_sita2019_buffers
2019-08-26 10:41:41,723 (MainThread): Acquiring new postgres connection "towers_transform_sita2019_buffers".
2019-08-26 10:41:41,724 (MainThread): Re-using an available connection from the pool (formerly towers_transform_spgmi2018_buffers).
2019-08-26 10:41:41,729 (MainThread): Parsing model.towers.towers_transform_sita2009_buffers
2019-08-26 10:41:41,729 (MainThread): Acquiring new postgres connection "towers_transform_sita2009_buffers".
2019-08-26 10:41:41,729 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2019_buffers).
2019-08-26 10:41:41,734 (MainThread): Parsing model.towers.base_towers_sita_2019
2019-08-26 10:41:41,735 (MainThread): Acquiring new postgres connection "base_towers_sita_2019".
2019-08-26 10:41:41,735 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2009_buffers).
2019-08-26 10:41:41,738 (MainThread): Parsing model.towers.base_towers_spgmi_2018
2019-08-26 10:41:41,739 (MainThread): Acquiring new postgres connection "base_towers_spgmi_2018".
2019-08-26 10:41:41,739 (MainThread): Re-using an available connection from the pool (formerly base_towers_sita_2019).
2019-08-26 10:41:41,742 (MainThread): Parsing model.towers.base_towers_sita_2009
2019-08-26 10:41:41,743 (MainThread): Acquiring new postgres connection "base_towers_sita_2009".
2019-08-26 10:41:41,743 (MainThread): Re-using an available connection from the pool (formerly base_towers_spgmi_2018).
2019-08-26 10:41:41,747 (MainThread): Parsing analysis.towers.morphology_spgmi_2018
2019-08-26 10:41:41,748 (MainThread): Acquiring new postgres connection "morphology_spgmi_2018".
2019-08-26 10:41:41,748 (MainThread): Re-using an available connection from the pool (formerly base_towers_sita_2009).
2019-08-26 10:41:41,780 (MainThread): Found 10 models, 0 tests, 0 snapshots, 1 analyses, 116 macros, 0 operations, 0 seed files, 0 sources
2019-08-26 10:41:41,781 (MainThread):
2019-08-26 10:41:41,781 (MainThread): Acquiring new postgres connection "master".
2019-08-26 10:41:41,781 (MainThread): Re-using an available connection from the pool (formerly morphology_spgmi_2018).
2019-08-26 10:41:41,792 (MainThread): Parsing macros/core.sql
2019-08-26 10:41:41,796 (MainThread): Parsing macros/materializations/helpers.sql
2019-08-26 10:41:41,802 (MainThread): Parsing macros/materializations/snapshot/snapshot_merge.sql
2019-08-26 10:41:41,804 (MainThread): Parsing macros/materializations/snapshot/strategies.sql
2019-08-26 10:41:41,815 (MainThread): Parsing macros/materializations/snapshot/snapshot.sql
2019-08-26 10:41:41,833 (MainThread): Parsing macros/materializations/seed/seed.sql
2019-08-26 10:41:41,846 (MainThread): Parsing macros/materializations/incremental/incremental.sql
2019-08-26 10:41:41,853 (MainThread): Parsing macros/materializations/common/merge.sql
2019-08-26 10:41:41,860 (MainThread): Parsing macros/materializations/table/table.sql
2019-08-26 10:41:41,865 (MainThread): Parsing macros/materializations/view/view.sql
2019-08-26 10:41:41,869 (MainThread): Parsing macros/materializations/view/create_or_replace_view.sql
2019-08-26 10:41:41,874 (MainThread): Parsing macros/etc/get_custom_alias.sql
2019-08-26 10:41:41,875 (MainThread): Parsing macros/etc/query.sql
2019-08-26 10:41:41,876 (MainThread): Parsing macros/etc/is_incremental.sql
2019-08-26 10:41:41,877 (MainThread): Parsing macros/etc/get_relation_comment.sql
2019-08-26 10:41:41,880 (MainThread): Parsing macros/etc/datetime.sql
2019-08-26 10:41:41,887 (MainThread): Parsing macros/etc/get_custom_schema.sql
2019-08-26 10:41:41,889 (MainThread): Parsing macros/adapters/common.sql
2019-08-26 10:41:41,918 (MainThread): Parsing macros/schema_tests/relationships.sql
2019-08-26 10:41:41,919 (MainThread): Parsing macros/schema_tests/not_null.sql
2019-08-26 10:41:41,920 (MainThread): Parsing macros/schema_tests/unique.sql
2019-08-26 10:41:41,921 (MainThread): Parsing macros/schema_tests/accepted_values.sql
2019-08-26 10:41:41,923 (MainThread): Parsing macros/catalog.sql
2019-08-26 10:41:41,926 (MainThread): Parsing macros/relations.sql
2019-08-26 10:41:41,927 (MainThread): Parsing macros/adapters.sql
2019-08-26 10:41:41,936 (MainThread): Parsing macros/materializations/snapshot_merge.sql
2019-08-26 10:41:42,036 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,036 (MainThread): On master:
select distinct nspname from pg_namespace
2019-08-26 10:41:42,119 (MainThread): SQL status: SELECT 370 in 0.08 seconds
2019-08-26 10:41:42,148 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,148 (MainThread): On master: BEGIN
2019-08-26 10:41:42,194 (MainThread): SQL status: BEGIN in 0.05 seconds
2019-08-26 10:41:42,195 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,195 (MainThread): On master: select
'reference' as database,
tablename as name,
schemaname as schema,
'table' as type
from pg_tables
where schemaname ilike 'tlee'
union all
select
'reference' as database,
viewname as name,
schemaname as schema,
'view' as type
from pg_views
where schemaname ilike 'tlee'
2019-08-26 10:41:42,314 (MainThread): SQL status: SELECT 261 in 0.12 seconds
2019-08-26 10:41:43,070 (MainThread): Using postgres connection "master".
2019-08-26 10:41:43,070 (MainThread): On master: --
--
with relation as (
select
pg_rewrite.ev_class as class,
pg_rewrite.oid as id
from pg_rewrite
),
class as (
select
oid as id,
relname as name,
relnamespace as schema,
relkind as kind
from pg_class
),
dependency as (
select
pg_depend.objid as id,
pg_depend.refobjid as ref
from pg_depend
),
schema as (
select
pg_namespace.oid as id,
pg_namespace.nspname as name
from pg_namespace
where nspname != 'information_schema' and nspname not like 'pg_%'
),
referenced as (
select
relation.id AS id,
referenced_class.name ,
referenced_class.schema ,
referenced_class.kind
from relation
join class as referenced_class on relation.class=referenced_class.id
where referenced_class.kind in ('r', 'v')
),
relationships as (
select
referenced.name as referenced_name,
referenced.schema as referenced_schema_id,
dependent_class.name as dependent_name,
dependent_class.schema as dependent_schema_id,
referenced.kind as kind
from referenced
join dependency on referenced.id=dependency.id
join class as dependent_class on dependency.ref=dependent_class.id
where
(referenced.name != dependent_class.name or
referenced.schema != dependent_class.schema)
)
select
referenced_schema.name as referenced_schema,
relationships.referenced_name as referenced_name,
dependent_schema.name as dependent_schema,
relationships.dependent_name as dependent_name
from relationships
join schema as dependent_schema on relationships.dependent_schema_id=dependent_schema.id
join schema as referenced_schema on relationships.referenced_schema_id=referenced_schema.id
group by referenced_schema, referenced_name, dependent_schema, dependent_name
order by referenced_schema, referenced_name, dependent_schema, dependent_name;
2019-08-26 10:41:46,706 (MainThread): SQL status: SELECT 49106 in 3.64 seconds
2019-08-26 10:42:04,957 (MainThread): On master: ROLLBACK
2019-08-26 10:42:04,995 (MainThread): Connection 'master' was left open.
2019-08-26 10:42:04,996 (MainThread): On master: Close
2019-08-26 10:42:04,997 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'end', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10edab110>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x1109d8910>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x1109d8cd0>]}
2019-08-26 10:42:05,196 (MainThread): Flushing usage events
2019-08-26 10:42:05,196 (MainThread): Encountered an error:
2019-08-26 10:42:05,196 (MainThread): Cache inconsistency detected: in add_link, referenced link key _ReferenceKey(database='reference', schema='tlee', identifier='cpp009_business_spend') not in cache!
2019-08-26 10:42:05,198 (MainThread): Traceback (most recent call last):
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 82, in main
results, succeeded = handle_and_check(args)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 151, in handle_and_check
task, res = run_from_args(parsed)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 216, in run_from_args
results = task.run()
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/runnable.py", line 282, in run
result = self.execute_with_hooks(selected_uids)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/runnable.py", line 252, in execute_with_hooks
self.before_run(adapter, selected_uids)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/run.py", line 150, in before_run
self.populate_adapter_cache(adapter)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/run.py", line 63, in populate_adapter_cache
adapter.set_relations_cache(self.manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 342, in set_relations_cache
self._relations_cache_for_schemas(manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 88, in _relations_cache_for_schemas
self._link_cached_relations(manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 84, in _link_cached_relations
self._link_cached_database_relations(schemas)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 60, in _link_cached_database_relations
self.cache.add_link(dependent, referenced)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/cache.py", line 296, in add_link
self._add_link(referenced, dependent)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/cache.py", line 254, in _add_link
.format(referenced_key)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/exceptions.py", line 473, in raise_cache_inconsistent
raise InternalException('Cache inconsistency detected: {}'.format(message))
dbt.exceptions.InternalException: Cache inconsistency detected: in add_link, referenced link key _ReferenceKey(database='reference', schema='tlee', identifier='cpp009_business_spend') not in cache!
|
dbt.exceptions.InternalException
|
def add_link(self, referenced, dependent):
"""Add a link between two relations to the database. If either relation
does not exist, it will be added as an "external" relation.
The dependent model refers _to_ the referenced model. So, given
arguments of (jake_test, bar, jake_test, foo):
both values are in the schema jake_test and foo is a view that refers
to bar, so "drop bar cascade" will drop foo and all of foo's
dependents.
:param BaseRelation referenced: The referenced model.
:param BaseRelation dependent: The dependent model.
:raises InternalError: If either entry does not exist.
"""
ref_key = _make_key(referenced)
if (ref_key.database, ref_key.schema) not in self:
# if we have not cached the referenced schema at all, we must be
# referring to a table outside our control. There's no need to make
# a link - we will never drop the referenced relation during a run.
logger.debug(
"{dep!s} references {ref!s} but {ref.database}.{ref.schema} "
"is not in the cache, skipping assumed external relation".format(
dep=dependent, ref=ref_key
)
)
return
if ref_key not in self.relations:
# Insert a dummy "external" relation.
referenced = referenced.replace(type=referenced.RelationType.External)
self.add(referenced)
dep_key = _make_key(dependent)
if dep_key not in self.relations:
# Insert a dummy "external" relation.
dependent = dependent.replace(type=referenced.RelationType.External)
self.add(dependent)
logger.debug("adding link, {!s} references {!s}".format(dep_key, ref_key))
with self.lock:
self._add_link(ref_key, dep_key)
|
def add_link(self, referenced, dependent):
"""Add a link between two relations to the database. Both the old and
new entries must already exist in the database.
The dependent model refers _to_ the referenced model. So, given
arguments of (jake_test, bar, jake_test, foo):
both values are in the schema jake_test and foo is a view that refers
to bar, so "drop bar cascade" will drop foo and all of foo's
dependents.
:param BaseRelation referenced: The referenced model.
:param BaseRelation dependent: The dependent model.
:raises InternalError: If either entry does not exist.
"""
referenced = _make_key(referenced)
if (referenced.database, referenced.schema) not in self:
# if we have not cached the referenced schema at all, we must be
# referring to a table outside our control. There's no need to make
# a link - we will never drop the referenced relation during a run.
logger.debug(
"{dep!s} references {ref!s} but {ref.database}.{ref.schema} "
"is not in the cache, skipping assumed external relation".format(
dep=dependent, ref=referenced
)
)
return
dependent = _make_key(dependent)
logger.debug("adding link, {!s} references {!s}".format(dependent, referenced))
with self.lock:
self._add_link(referenced, dependent)
|
https://github.com/fishtown-analytics/dbt/issues/1698
|
dbt --debug run
Running with dbt=0.14.0
2019-08-26 10:41:40,953 (MainThread): Tracking: tracking
2019-08-26 10:41:40,959 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'start', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10eda3bd0>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10edae510>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10eda3b90>]}
2019-08-26 10:41:41,213 (MainThread): Parsing macros/core.sql
2019-08-26 10:41:41,219 (MainThread): Parsing macros/materializations/helpers.sql
2019-08-26 10:41:41,227 (MainThread): Parsing macros/materializations/snapshot/snapshot_merge.sql
2019-08-26 10:41:41,229 (MainThread): Parsing macros/materializations/snapshot/strategies.sql
2019-08-26 10:41:41,242 (MainThread): Parsing macros/materializations/snapshot/snapshot.sql
2019-08-26 10:41:41,261 (MainThread): Parsing macros/materializations/seed/seed.sql
2019-08-26 10:41:41,275 (MainThread): Parsing macros/materializations/incremental/incremental.sql
2019-08-26 10:41:41,282 (MainThread): Parsing macros/materializations/common/merge.sql
2019-08-26 10:41:41,288 (MainThread): Parsing macros/materializations/table/table.sql
2019-08-26 10:41:41,293 (MainThread): Parsing macros/materializations/view/view.sql
2019-08-26 10:41:41,298 (MainThread): Parsing macros/materializations/view/create_or_replace_view.sql
2019-08-26 10:41:41,303 (MainThread): Parsing macros/etc/get_custom_alias.sql
2019-08-26 10:41:41,304 (MainThread): Parsing macros/etc/query.sql
2019-08-26 10:41:41,305 (MainThread): Parsing macros/etc/is_incremental.sql
2019-08-26 10:41:41,307 (MainThread): Parsing macros/etc/get_relation_comment.sql
2019-08-26 10:41:41,309 (MainThread): Parsing macros/etc/datetime.sql
2019-08-26 10:41:41,317 (MainThread): Parsing macros/etc/get_custom_schema.sql
2019-08-26 10:41:41,319 (MainThread): Parsing macros/adapters/common.sql
2019-08-26 10:41:41,348 (MainThread): Parsing macros/schema_tests/relationships.sql
2019-08-26 10:41:41,350 (MainThread): Parsing macros/schema_tests/not_null.sql
2019-08-26 10:41:41,351 (MainThread): Parsing macros/schema_tests/unique.sql
2019-08-26 10:41:41,352 (MainThread): Parsing macros/schema_tests/accepted_values.sql
2019-08-26 10:41:41,354 (MainThread): Parsing macros/catalog.sql
2019-08-26 10:41:41,356 (MainThread): Parsing macros/relations.sql
2019-08-26 10:41:41,357 (MainThread): Parsing macros/adapters.sql
2019-08-26 10:41:41,367 (MainThread): Parsing macros/materializations/snapshot_merge.sql
2019-08-26 10:41:41,380 (MainThread): Parsing model.towers.distance_sita2019_spgmi2018
2019-08-26 10:41:41,381 (MainThread): Acquiring new postgres connection "distance_sita2019_spgmi2018".
2019-08-26 10:41:41,381 (MainThread): Opening a new connection, currently in state init
2019-08-26 10:41:41,698 (MainThread): Parsing model.towers.distance_spgmi2018_sita2019
2019-08-26 10:41:41,699 (MainThread): Acquiring new postgres connection "distance_spgmi2018_sita2019".
2019-08-26 10:41:41,699 (MainThread): Re-using an available connection from the pool (formerly distance_sita2019_spgmi2018).
2019-08-26 10:41:41,705 (MainThread): Parsing model.towers.towers_spgmisita100m_20180701
2019-08-26 10:41:41,706 (MainThread): Acquiring new postgres connection "towers_spgmisita100m_20180701".
2019-08-26 10:41:41,706 (MainThread): Re-using an available connection from the pool (formerly distance_spgmi2018_sita2019).
2019-08-26 10:41:41,710 (MainThread): Parsing model.towers.towers_transform_sita2009_nearspgmi
2019-08-26 10:41:41,711 (MainThread): Acquiring new postgres connection "towers_transform_sita2009_nearspgmi".
2019-08-26 10:41:41,711 (MainThread): Re-using an available connection from the pool (formerly towers_spgmisita100m_20180701).
2019-08-26 10:41:41,716 (MainThread): Parsing model.towers.towers_transform_spgmi2018_buffers
2019-08-26 10:41:41,717 (MainThread): Acquiring new postgres connection "towers_transform_spgmi2018_buffers".
2019-08-26 10:41:41,717 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2009_nearspgmi).
2019-08-26 10:41:41,723 (MainThread): Parsing model.towers.towers_transform_sita2019_buffers
2019-08-26 10:41:41,723 (MainThread): Acquiring new postgres connection "towers_transform_sita2019_buffers".
2019-08-26 10:41:41,724 (MainThread): Re-using an available connection from the pool (formerly towers_transform_spgmi2018_buffers).
2019-08-26 10:41:41,729 (MainThread): Parsing model.towers.towers_transform_sita2009_buffers
2019-08-26 10:41:41,729 (MainThread): Acquiring new postgres connection "towers_transform_sita2009_buffers".
2019-08-26 10:41:41,729 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2019_buffers).
2019-08-26 10:41:41,734 (MainThread): Parsing model.towers.base_towers_sita_2019
2019-08-26 10:41:41,735 (MainThread): Acquiring new postgres connection "base_towers_sita_2019".
2019-08-26 10:41:41,735 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2009_buffers).
2019-08-26 10:41:41,738 (MainThread): Parsing model.towers.base_towers_spgmi_2018
2019-08-26 10:41:41,739 (MainThread): Acquiring new postgres connection "base_towers_spgmi_2018".
2019-08-26 10:41:41,739 (MainThread): Re-using an available connection from the pool (formerly base_towers_sita_2019).
2019-08-26 10:41:41,742 (MainThread): Parsing model.towers.base_towers_sita_2009
2019-08-26 10:41:41,743 (MainThread): Acquiring new postgres connection "base_towers_sita_2009".
2019-08-26 10:41:41,743 (MainThread): Re-using an available connection from the pool (formerly base_towers_spgmi_2018).
2019-08-26 10:41:41,747 (MainThread): Parsing analysis.towers.morphology_spgmi_2018
2019-08-26 10:41:41,748 (MainThread): Acquiring new postgres connection "morphology_spgmi_2018".
2019-08-26 10:41:41,748 (MainThread): Re-using an available connection from the pool (formerly base_towers_sita_2009).
2019-08-26 10:41:41,780 (MainThread): Found 10 models, 0 tests, 0 snapshots, 1 analyses, 116 macros, 0 operations, 0 seed files, 0 sources
2019-08-26 10:41:41,781 (MainThread):
2019-08-26 10:41:41,781 (MainThread): Acquiring new postgres connection "master".
2019-08-26 10:41:41,781 (MainThread): Re-using an available connection from the pool (formerly morphology_spgmi_2018).
2019-08-26 10:41:41,792 (MainThread): Parsing macros/core.sql
2019-08-26 10:41:41,796 (MainThread): Parsing macros/materializations/helpers.sql
2019-08-26 10:41:41,802 (MainThread): Parsing macros/materializations/snapshot/snapshot_merge.sql
2019-08-26 10:41:41,804 (MainThread): Parsing macros/materializations/snapshot/strategies.sql
2019-08-26 10:41:41,815 (MainThread): Parsing macros/materializations/snapshot/snapshot.sql
2019-08-26 10:41:41,833 (MainThread): Parsing macros/materializations/seed/seed.sql
2019-08-26 10:41:41,846 (MainThread): Parsing macros/materializations/incremental/incremental.sql
2019-08-26 10:41:41,853 (MainThread): Parsing macros/materializations/common/merge.sql
2019-08-26 10:41:41,860 (MainThread): Parsing macros/materializations/table/table.sql
2019-08-26 10:41:41,865 (MainThread): Parsing macros/materializations/view/view.sql
2019-08-26 10:41:41,869 (MainThread): Parsing macros/materializations/view/create_or_replace_view.sql
2019-08-26 10:41:41,874 (MainThread): Parsing macros/etc/get_custom_alias.sql
2019-08-26 10:41:41,875 (MainThread): Parsing macros/etc/query.sql
2019-08-26 10:41:41,876 (MainThread): Parsing macros/etc/is_incremental.sql
2019-08-26 10:41:41,877 (MainThread): Parsing macros/etc/get_relation_comment.sql
2019-08-26 10:41:41,880 (MainThread): Parsing macros/etc/datetime.sql
2019-08-26 10:41:41,887 (MainThread): Parsing macros/etc/get_custom_schema.sql
2019-08-26 10:41:41,889 (MainThread): Parsing macros/adapters/common.sql
2019-08-26 10:41:41,918 (MainThread): Parsing macros/schema_tests/relationships.sql
2019-08-26 10:41:41,919 (MainThread): Parsing macros/schema_tests/not_null.sql
2019-08-26 10:41:41,920 (MainThread): Parsing macros/schema_tests/unique.sql
2019-08-26 10:41:41,921 (MainThread): Parsing macros/schema_tests/accepted_values.sql
2019-08-26 10:41:41,923 (MainThread): Parsing macros/catalog.sql
2019-08-26 10:41:41,926 (MainThread): Parsing macros/relations.sql
2019-08-26 10:41:41,927 (MainThread): Parsing macros/adapters.sql
2019-08-26 10:41:41,936 (MainThread): Parsing macros/materializations/snapshot_merge.sql
2019-08-26 10:41:42,036 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,036 (MainThread): On master:
select distinct nspname from pg_namespace
2019-08-26 10:41:42,119 (MainThread): SQL status: SELECT 370 in 0.08 seconds
2019-08-26 10:41:42,148 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,148 (MainThread): On master: BEGIN
2019-08-26 10:41:42,194 (MainThread): SQL status: BEGIN in 0.05 seconds
2019-08-26 10:41:42,195 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,195 (MainThread): On master: select
'reference' as database,
tablename as name,
schemaname as schema,
'table' as type
from pg_tables
where schemaname ilike 'tlee'
union all
select
'reference' as database,
viewname as name,
schemaname as schema,
'view' as type
from pg_views
where schemaname ilike 'tlee'
2019-08-26 10:41:42,314 (MainThread): SQL status: SELECT 261 in 0.12 seconds
2019-08-26 10:41:43,070 (MainThread): Using postgres connection "master".
2019-08-26 10:41:43,070 (MainThread): On master: --
--
with relation as (
select
pg_rewrite.ev_class as class,
pg_rewrite.oid as id
from pg_rewrite
),
class as (
select
oid as id,
relname as name,
relnamespace as schema,
relkind as kind
from pg_class
),
dependency as (
select
pg_depend.objid as id,
pg_depend.refobjid as ref
from pg_depend
),
schema as (
select
pg_namespace.oid as id,
pg_namespace.nspname as name
from pg_namespace
where nspname != 'information_schema' and nspname not like 'pg_%'
),
referenced as (
select
relation.id AS id,
referenced_class.name ,
referenced_class.schema ,
referenced_class.kind
from relation
join class as referenced_class on relation.class=referenced_class.id
where referenced_class.kind in ('r', 'v')
),
relationships as (
select
referenced.name as referenced_name,
referenced.schema as referenced_schema_id,
dependent_class.name as dependent_name,
dependent_class.schema as dependent_schema_id,
referenced.kind as kind
from referenced
join dependency on referenced.id=dependency.id
join class as dependent_class on dependency.ref=dependent_class.id
where
(referenced.name != dependent_class.name or
referenced.schema != dependent_class.schema)
)
select
referenced_schema.name as referenced_schema,
relationships.referenced_name as referenced_name,
dependent_schema.name as dependent_schema,
relationships.dependent_name as dependent_name
from relationships
join schema as dependent_schema on relationships.dependent_schema_id=dependent_schema.id
join schema as referenced_schema on relationships.referenced_schema_id=referenced_schema.id
group by referenced_schema, referenced_name, dependent_schema, dependent_name
order by referenced_schema, referenced_name, dependent_schema, dependent_name;
2019-08-26 10:41:46,706 (MainThread): SQL status: SELECT 49106 in 3.64 seconds
2019-08-26 10:42:04,957 (MainThread): On master: ROLLBACK
2019-08-26 10:42:04,995 (MainThread): Connection 'master' was left open.
2019-08-26 10:42:04,996 (MainThread): On master: Close
2019-08-26 10:42:04,997 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'end', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10edab110>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x1109d8910>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x1109d8cd0>]}
2019-08-26 10:42:05,196 (MainThread): Flushing usage events
2019-08-26 10:42:05,196 (MainThread): Encountered an error:
2019-08-26 10:42:05,196 (MainThread): Cache inconsistency detected: in add_link, referenced link key _ReferenceKey(database='reference', schema='tlee', identifier='cpp009_business_spend') not in cache!
2019-08-26 10:42:05,198 (MainThread): Traceback (most recent call last):
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 82, in main
results, succeeded = handle_and_check(args)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 151, in handle_and_check
task, res = run_from_args(parsed)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 216, in run_from_args
results = task.run()
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/runnable.py", line 282, in run
result = self.execute_with_hooks(selected_uids)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/runnable.py", line 252, in execute_with_hooks
self.before_run(adapter, selected_uids)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/run.py", line 150, in before_run
self.populate_adapter_cache(adapter)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/run.py", line 63, in populate_adapter_cache
adapter.set_relations_cache(self.manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 342, in set_relations_cache
self._relations_cache_for_schemas(manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 88, in _relations_cache_for_schemas
self._link_cached_relations(manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 84, in _link_cached_relations
self._link_cached_database_relations(schemas)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 60, in _link_cached_database_relations
self.cache.add_link(dependent, referenced)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/cache.py", line 296, in add_link
self._add_link(referenced, dependent)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/cache.py", line 254, in _add_link
.format(referenced_key)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/exceptions.py", line 473, in raise_cache_inconsistent
raise InternalException('Cache inconsistency detected: {}'.format(message))
dbt.exceptions.InternalException: Cache inconsistency detected: in add_link, referenced link key _ReferenceKey(database='reference', schema='tlee', identifier='cpp009_business_spend') not in cache!
|
dbt.exceptions.InternalException
|
def _link_cached_database_relations(self, schemas):
"""
:param Set[str] schemas: The set of schemas that should have links
added.
"""
database = self.config.credentials.database
table = self.execute_macro(GET_RELATIONS_MACRO_NAME)
for dep_schema, dep_name, refed_schema, refed_name in table:
dependent = self.Relation.create(
database=database, schema=dep_schema, identifier=dep_name
)
referenced = self.Relation.create(
database=database, schema=refed_schema, identifier=refed_name
)
# don't record in cache if this relation isn't in a relevant
# schema
if refed_schema.lower() in schemas:
self.cache.add_link(referenced, dependent)
|
def _link_cached_database_relations(self, schemas):
"""
:param Set[str] schemas: The set of schemas that should have links
added.
"""
database = self.config.credentials.database
table = self.execute_macro(GET_RELATIONS_MACRO_NAME)
for refed_schema, refed_name, dep_schema, dep_name in table:
referenced = self.Relation.create(
database=database, schema=refed_schema, identifier=refed_name
)
dependent = self.Relation.create(
database=database, schema=dep_schema, identifier=dep_name
)
# don't record in cache if this relation isn't in a relevant
# schema
if refed_schema.lower() in schemas:
self.cache.add_link(dependent, referenced)
|
https://github.com/fishtown-analytics/dbt/issues/1698
|
dbt --debug run
Running with dbt=0.14.0
2019-08-26 10:41:40,953 (MainThread): Tracking: tracking
2019-08-26 10:41:40,959 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'start', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10eda3bd0>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10edae510>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10eda3b90>]}
2019-08-26 10:41:41,213 (MainThread): Parsing macros/core.sql
2019-08-26 10:41:41,219 (MainThread): Parsing macros/materializations/helpers.sql
2019-08-26 10:41:41,227 (MainThread): Parsing macros/materializations/snapshot/snapshot_merge.sql
2019-08-26 10:41:41,229 (MainThread): Parsing macros/materializations/snapshot/strategies.sql
2019-08-26 10:41:41,242 (MainThread): Parsing macros/materializations/snapshot/snapshot.sql
2019-08-26 10:41:41,261 (MainThread): Parsing macros/materializations/seed/seed.sql
2019-08-26 10:41:41,275 (MainThread): Parsing macros/materializations/incremental/incremental.sql
2019-08-26 10:41:41,282 (MainThread): Parsing macros/materializations/common/merge.sql
2019-08-26 10:41:41,288 (MainThread): Parsing macros/materializations/table/table.sql
2019-08-26 10:41:41,293 (MainThread): Parsing macros/materializations/view/view.sql
2019-08-26 10:41:41,298 (MainThread): Parsing macros/materializations/view/create_or_replace_view.sql
2019-08-26 10:41:41,303 (MainThread): Parsing macros/etc/get_custom_alias.sql
2019-08-26 10:41:41,304 (MainThread): Parsing macros/etc/query.sql
2019-08-26 10:41:41,305 (MainThread): Parsing macros/etc/is_incremental.sql
2019-08-26 10:41:41,307 (MainThread): Parsing macros/etc/get_relation_comment.sql
2019-08-26 10:41:41,309 (MainThread): Parsing macros/etc/datetime.sql
2019-08-26 10:41:41,317 (MainThread): Parsing macros/etc/get_custom_schema.sql
2019-08-26 10:41:41,319 (MainThread): Parsing macros/adapters/common.sql
2019-08-26 10:41:41,348 (MainThread): Parsing macros/schema_tests/relationships.sql
2019-08-26 10:41:41,350 (MainThread): Parsing macros/schema_tests/not_null.sql
2019-08-26 10:41:41,351 (MainThread): Parsing macros/schema_tests/unique.sql
2019-08-26 10:41:41,352 (MainThread): Parsing macros/schema_tests/accepted_values.sql
2019-08-26 10:41:41,354 (MainThread): Parsing macros/catalog.sql
2019-08-26 10:41:41,356 (MainThread): Parsing macros/relations.sql
2019-08-26 10:41:41,357 (MainThread): Parsing macros/adapters.sql
2019-08-26 10:41:41,367 (MainThread): Parsing macros/materializations/snapshot_merge.sql
2019-08-26 10:41:41,380 (MainThread): Parsing model.towers.distance_sita2019_spgmi2018
2019-08-26 10:41:41,381 (MainThread): Acquiring new postgres connection "distance_sita2019_spgmi2018".
2019-08-26 10:41:41,381 (MainThread): Opening a new connection, currently in state init
2019-08-26 10:41:41,698 (MainThread): Parsing model.towers.distance_spgmi2018_sita2019
2019-08-26 10:41:41,699 (MainThread): Acquiring new postgres connection "distance_spgmi2018_sita2019".
2019-08-26 10:41:41,699 (MainThread): Re-using an available connection from the pool (formerly distance_sita2019_spgmi2018).
2019-08-26 10:41:41,705 (MainThread): Parsing model.towers.towers_spgmisita100m_20180701
2019-08-26 10:41:41,706 (MainThread): Acquiring new postgres connection "towers_spgmisita100m_20180701".
2019-08-26 10:41:41,706 (MainThread): Re-using an available connection from the pool (formerly distance_spgmi2018_sita2019).
2019-08-26 10:41:41,710 (MainThread): Parsing model.towers.towers_transform_sita2009_nearspgmi
2019-08-26 10:41:41,711 (MainThread): Acquiring new postgres connection "towers_transform_sita2009_nearspgmi".
2019-08-26 10:41:41,711 (MainThread): Re-using an available connection from the pool (formerly towers_spgmisita100m_20180701).
2019-08-26 10:41:41,716 (MainThread): Parsing model.towers.towers_transform_spgmi2018_buffers
2019-08-26 10:41:41,717 (MainThread): Acquiring new postgres connection "towers_transform_spgmi2018_buffers".
2019-08-26 10:41:41,717 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2009_nearspgmi).
2019-08-26 10:41:41,723 (MainThread): Parsing model.towers.towers_transform_sita2019_buffers
2019-08-26 10:41:41,723 (MainThread): Acquiring new postgres connection "towers_transform_sita2019_buffers".
2019-08-26 10:41:41,724 (MainThread): Re-using an available connection from the pool (formerly towers_transform_spgmi2018_buffers).
2019-08-26 10:41:41,729 (MainThread): Parsing model.towers.towers_transform_sita2009_buffers
2019-08-26 10:41:41,729 (MainThread): Acquiring new postgres connection "towers_transform_sita2009_buffers".
2019-08-26 10:41:41,729 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2019_buffers).
2019-08-26 10:41:41,734 (MainThread): Parsing model.towers.base_towers_sita_2019
2019-08-26 10:41:41,735 (MainThread): Acquiring new postgres connection "base_towers_sita_2019".
2019-08-26 10:41:41,735 (MainThread): Re-using an available connection from the pool (formerly towers_transform_sita2009_buffers).
2019-08-26 10:41:41,738 (MainThread): Parsing model.towers.base_towers_spgmi_2018
2019-08-26 10:41:41,739 (MainThread): Acquiring new postgres connection "base_towers_spgmi_2018".
2019-08-26 10:41:41,739 (MainThread): Re-using an available connection from the pool (formerly base_towers_sita_2019).
2019-08-26 10:41:41,742 (MainThread): Parsing model.towers.base_towers_sita_2009
2019-08-26 10:41:41,743 (MainThread): Acquiring new postgres connection "base_towers_sita_2009".
2019-08-26 10:41:41,743 (MainThread): Re-using an available connection from the pool (formerly base_towers_spgmi_2018).
2019-08-26 10:41:41,747 (MainThread): Parsing analysis.towers.morphology_spgmi_2018
2019-08-26 10:41:41,748 (MainThread): Acquiring new postgres connection "morphology_spgmi_2018".
2019-08-26 10:41:41,748 (MainThread): Re-using an available connection from the pool (formerly base_towers_sita_2009).
2019-08-26 10:41:41,780 (MainThread): Found 10 models, 0 tests, 0 snapshots, 1 analyses, 116 macros, 0 operations, 0 seed files, 0 sources
2019-08-26 10:41:41,781 (MainThread):
2019-08-26 10:41:41,781 (MainThread): Acquiring new postgres connection "master".
2019-08-26 10:41:41,781 (MainThread): Re-using an available connection from the pool (formerly morphology_spgmi_2018).
2019-08-26 10:41:41,792 (MainThread): Parsing macros/core.sql
2019-08-26 10:41:41,796 (MainThread): Parsing macros/materializations/helpers.sql
2019-08-26 10:41:41,802 (MainThread): Parsing macros/materializations/snapshot/snapshot_merge.sql
2019-08-26 10:41:41,804 (MainThread): Parsing macros/materializations/snapshot/strategies.sql
2019-08-26 10:41:41,815 (MainThread): Parsing macros/materializations/snapshot/snapshot.sql
2019-08-26 10:41:41,833 (MainThread): Parsing macros/materializations/seed/seed.sql
2019-08-26 10:41:41,846 (MainThread): Parsing macros/materializations/incremental/incremental.sql
2019-08-26 10:41:41,853 (MainThread): Parsing macros/materializations/common/merge.sql
2019-08-26 10:41:41,860 (MainThread): Parsing macros/materializations/table/table.sql
2019-08-26 10:41:41,865 (MainThread): Parsing macros/materializations/view/view.sql
2019-08-26 10:41:41,869 (MainThread): Parsing macros/materializations/view/create_or_replace_view.sql
2019-08-26 10:41:41,874 (MainThread): Parsing macros/etc/get_custom_alias.sql
2019-08-26 10:41:41,875 (MainThread): Parsing macros/etc/query.sql
2019-08-26 10:41:41,876 (MainThread): Parsing macros/etc/is_incremental.sql
2019-08-26 10:41:41,877 (MainThread): Parsing macros/etc/get_relation_comment.sql
2019-08-26 10:41:41,880 (MainThread): Parsing macros/etc/datetime.sql
2019-08-26 10:41:41,887 (MainThread): Parsing macros/etc/get_custom_schema.sql
2019-08-26 10:41:41,889 (MainThread): Parsing macros/adapters/common.sql
2019-08-26 10:41:41,918 (MainThread): Parsing macros/schema_tests/relationships.sql
2019-08-26 10:41:41,919 (MainThread): Parsing macros/schema_tests/not_null.sql
2019-08-26 10:41:41,920 (MainThread): Parsing macros/schema_tests/unique.sql
2019-08-26 10:41:41,921 (MainThread): Parsing macros/schema_tests/accepted_values.sql
2019-08-26 10:41:41,923 (MainThread): Parsing macros/catalog.sql
2019-08-26 10:41:41,926 (MainThread): Parsing macros/relations.sql
2019-08-26 10:41:41,927 (MainThread): Parsing macros/adapters.sql
2019-08-26 10:41:41,936 (MainThread): Parsing macros/materializations/snapshot_merge.sql
2019-08-26 10:41:42,036 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,036 (MainThread): On master:
select distinct nspname from pg_namespace
2019-08-26 10:41:42,119 (MainThread): SQL status: SELECT 370 in 0.08 seconds
2019-08-26 10:41:42,148 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,148 (MainThread): On master: BEGIN
2019-08-26 10:41:42,194 (MainThread): SQL status: BEGIN in 0.05 seconds
2019-08-26 10:41:42,195 (MainThread): Using postgres connection "master".
2019-08-26 10:41:42,195 (MainThread): On master: select
'reference' as database,
tablename as name,
schemaname as schema,
'table' as type
from pg_tables
where schemaname ilike 'tlee'
union all
select
'reference' as database,
viewname as name,
schemaname as schema,
'view' as type
from pg_views
where schemaname ilike 'tlee'
2019-08-26 10:41:42,314 (MainThread): SQL status: SELECT 261 in 0.12 seconds
2019-08-26 10:41:43,070 (MainThread): Using postgres connection "master".
2019-08-26 10:41:43,070 (MainThread): On master: --
--
with relation as (
select
pg_rewrite.ev_class as class,
pg_rewrite.oid as id
from pg_rewrite
),
class as (
select
oid as id,
relname as name,
relnamespace as schema,
relkind as kind
from pg_class
),
dependency as (
select
pg_depend.objid as id,
pg_depend.refobjid as ref
from pg_depend
),
schema as (
select
pg_namespace.oid as id,
pg_namespace.nspname as name
from pg_namespace
where nspname != 'information_schema' and nspname not like 'pg_%'
),
referenced as (
select
relation.id AS id,
referenced_class.name ,
referenced_class.schema ,
referenced_class.kind
from relation
join class as referenced_class on relation.class=referenced_class.id
where referenced_class.kind in ('r', 'v')
),
relationships as (
select
referenced.name as referenced_name,
referenced.schema as referenced_schema_id,
dependent_class.name as dependent_name,
dependent_class.schema as dependent_schema_id,
referenced.kind as kind
from referenced
join dependency on referenced.id=dependency.id
join class as dependent_class on dependency.ref=dependent_class.id
where
(referenced.name != dependent_class.name or
referenced.schema != dependent_class.schema)
)
select
referenced_schema.name as referenced_schema,
relationships.referenced_name as referenced_name,
dependent_schema.name as dependent_schema,
relationships.dependent_name as dependent_name
from relationships
join schema as dependent_schema on relationships.dependent_schema_id=dependent_schema.id
join schema as referenced_schema on relationships.referenced_schema_id=referenced_schema.id
group by referenced_schema, referenced_name, dependent_schema, dependent_name
order by referenced_schema, referenced_name, dependent_schema, dependent_name;
2019-08-26 10:41:46,706 (MainThread): SQL status: SELECT 49106 in 3.64 seconds
2019-08-26 10:42:04,957 (MainThread): On master: ROLLBACK
2019-08-26 10:42:04,995 (MainThread): Connection 'master' was left open.
2019-08-26 10:42:04,996 (MainThread): On master: Close
2019-08-26 10:42:04,997 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'end', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10edab110>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x1109d8910>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x1109d8cd0>]}
2019-08-26 10:42:05,196 (MainThread): Flushing usage events
2019-08-26 10:42:05,196 (MainThread): Encountered an error:
2019-08-26 10:42:05,196 (MainThread): Cache inconsistency detected: in add_link, referenced link key _ReferenceKey(database='reference', schema='tlee', identifier='cpp009_business_spend') not in cache!
2019-08-26 10:42:05,198 (MainThread): Traceback (most recent call last):
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 82, in main
results, succeeded = handle_and_check(args)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 151, in handle_and_check
task, res = run_from_args(parsed)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/main.py", line 216, in run_from_args
results = task.run()
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/runnable.py", line 282, in run
result = self.execute_with_hooks(selected_uids)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/runnable.py", line 252, in execute_with_hooks
self.before_run(adapter, selected_uids)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/run.py", line 150, in before_run
self.populate_adapter_cache(adapter)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/task/run.py", line 63, in populate_adapter_cache
adapter.set_relations_cache(self.manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/base/impl.py", line 342, in set_relations_cache
self._relations_cache_for_schemas(manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 88, in _relations_cache_for_schemas
self._link_cached_relations(manifest)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 84, in _link_cached_relations
self._link_cached_database_relations(schemas)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/postgres/impl.py", line 60, in _link_cached_database_relations
self.cache.add_link(dependent, referenced)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/cache.py", line 296, in add_link
self._add_link(referenced, dependent)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/adapters/cache.py", line 254, in _add_link
.format(referenced_key)
File "/usr/local/Cellar/dbt@0.14.0/0.14.0/libexec/lib/python3.7/site-packages/dbt/exceptions.py", line 473, in raise_cache_inconsistent
raise InternalException('Cache inconsistency detected: {}'.format(message))
dbt.exceptions.InternalException: Cache inconsistency detected: in add_link, referenced link key _ReferenceKey(database='reference', schema='tlee', identifier='cpp009_business_spend') not in cache!
|
dbt.exceptions.InternalException
|
def find_matching(root_path, relative_paths_to_search, file_pattern):
"""
Given an absolute `root_path`, a list of relative paths to that
absolute root path (`relative_paths_to_search`), and a `file_pattern`
like '*.sql', returns information about the files. For example:
> find_matching('/root/path', ['models'], '*.sql')
[ { 'absolute_path': '/root/path/models/model_one.sql',
'relative_path': 'model_one.sql',
'searched_path': 'models' },
{ 'absolute_path': '/root/path/models/subdirectory/model_two.sql',
'relative_path': 'subdirectory/model_two.sql',
'searched_path': 'models' } ]
"""
matching = []
root_path = os.path.normpath(root_path)
for relative_path_to_search in relative_paths_to_search:
absolute_path_to_search = os.path.join(root_path, relative_path_to_search)
walk_results = os.walk(absolute_path_to_search)
for current_path, subdirectories, local_files in walk_results:
for local_file in local_files:
absolute_path = os.path.join(current_path, local_file)
relative_path = os.path.relpath(absolute_path, absolute_path_to_search)
if fnmatch.fnmatch(local_file, file_pattern):
matching.append(
{
"searched_path": os.path.normcase(relative_path_to_search),
"absolute_path": os.path.normcase(absolute_path),
"relative_path": os.path.normcase(relative_path),
}
)
return matching
|
def find_matching(root_path, relative_paths_to_search, file_pattern):
"""
Given an absolute `root_path`, a list of relative paths to that
absolute root path (`relative_paths_to_search`), and a `file_pattern`
like '*.sql', returns information about the files. For example:
> find_matching('/root/path', ['models'], '*.sql')
[ { 'absolute_path': '/root/path/models/model_one.sql',
'relative_path': 'model_one.sql',
'searched_path': 'models' },
{ 'absolute_path': '/root/path/models/subdirectory/model_two.sql',
'relative_path': 'subdirectory/model_two.sql',
'searched_path': 'models' } ]
"""
matching = []
root_path = os.path.normpath(root_path)
for relative_path_to_search in relative_paths_to_search:
absolute_path_to_search = os.path.join(root_path, relative_path_to_search)
walk_results = os.walk(absolute_path_to_search)
for current_path, subdirectories, local_files in walk_results:
for local_file in local_files:
absolute_path = os.path.join(current_path, local_file)
relative_path = os.path.relpath(absolute_path, absolute_path_to_search)
if fnmatch.fnmatch(local_file, file_pattern):
matching.append(
{
"searched_path": relative_path_to_search,
"absolute_path": absolute_path,
"relative_path": relative_path,
}
)
return matching
|
https://github.com/fishtown-analytics/dbt/issues/1723
|
2019-09-04 16:36:25,854704 (MainThread): Acquiring new postgres connection "seedtable".
2019-09-04 16:36:25,854894 (MainThread): Re-using an available connection from the pool (formerly model_2).
2019-09-04 16:36:25,963811 (MainThread): Unhandled error while executing seed.minimal.seedtable
[Errno 2] No such file or directory: 'data/seedtable.csv'
2019-09-04 16:36:25,964249 (MainThread):
Traceback (most recent call last):
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 203, in safe_run
result = self.compile_and_execute(manifest, ctx)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 146, in compile_and_execute
result = self.run(ctx.node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 245, in run
return self.execute(compiled_node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 357, in execute
materialization_macro.generator(context)()
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/jinja.py", line 127, in call
return macro(*args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 575, in __call__
return self._invoke(arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 110, in _invoke
return original_invoke(self, arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 579, in _invoke
rv = self._func(*arguments)
File "<template>", line 346, in macro
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/sandbox.py", line 438, in call
return __context.call(__obj, *args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 262, in call
return __obj(*args, **kwargs)
File "/Users/jake/src/fishtown/dbt/core/dbt/context/common.py", line 364, in load_agate_table
table = dbt.clients.agate_helper.from_csv(path)
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/agate_helper.py", line 65, in from_csv
with open(abspath, encoding='utf-8') as fp:
FileNotFoundError: [Errno 2] No such file or directory: 'data/seedtable.csv'
|
FileNotFoundError
|
def _build_load_agate_table(
model: Union[ParsedSeedNode, CompiledSeedNode],
) -> Callable[[], agate.Table]:
def load_agate_table():
path = model.seed_file_path
try:
table = dbt.clients.agate_helper.from_csv(path)
except ValueError as e:
dbt.exceptions.raise_compiler_error(str(e))
table.original_abspath = os.path.abspath(path)
return table
return load_agate_table
|
def _build_load_agate_table(model):
def load_agate_table():
path = model.original_file_path
try:
table = dbt.clients.agate_helper.from_csv(path)
except ValueError as e:
dbt.exceptions.raise_compiler_error(str(e))
table.original_abspath = os.path.abspath(path)
return table
return load_agate_table
|
https://github.com/fishtown-analytics/dbt/issues/1723
|
2019-09-04 16:36:25,854704 (MainThread): Acquiring new postgres connection "seedtable".
2019-09-04 16:36:25,854894 (MainThread): Re-using an available connection from the pool (formerly model_2).
2019-09-04 16:36:25,963811 (MainThread): Unhandled error while executing seed.minimal.seedtable
[Errno 2] No such file or directory: 'data/seedtable.csv'
2019-09-04 16:36:25,964249 (MainThread):
Traceback (most recent call last):
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 203, in safe_run
result = self.compile_and_execute(manifest, ctx)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 146, in compile_and_execute
result = self.run(ctx.node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 245, in run
return self.execute(compiled_node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 357, in execute
materialization_macro.generator(context)()
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/jinja.py", line 127, in call
return macro(*args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 575, in __call__
return self._invoke(arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 110, in _invoke
return original_invoke(self, arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 579, in _invoke
rv = self._func(*arguments)
File "<template>", line 346, in macro
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/sandbox.py", line 438, in call
return __context.call(__obj, *args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 262, in call
return __obj(*args, **kwargs)
File "/Users/jake/src/fishtown/dbt/core/dbt/context/common.py", line 364, in load_agate_table
table = dbt.clients.agate_helper.from_csv(path)
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/agate_helper.py", line 65, in from_csv
with open(abspath, encoding='utf-8') as fp:
FileNotFoundError: [Errno 2] No such file or directory: 'data/seedtable.csv'
|
FileNotFoundError
|
def load_agate_table():
path = model.seed_file_path
try:
table = dbt.clients.agate_helper.from_csv(path)
except ValueError as e:
dbt.exceptions.raise_compiler_error(str(e))
table.original_abspath = os.path.abspath(path)
return table
|
def load_agate_table():
path = model.original_file_path
try:
table = dbt.clients.agate_helper.from_csv(path)
except ValueError as e:
dbt.exceptions.raise_compiler_error(str(e))
table.original_abspath = os.path.abspath(path)
return table
|
https://github.com/fishtown-analytics/dbt/issues/1723
|
2019-09-04 16:36:25,854704 (MainThread): Acquiring new postgres connection "seedtable".
2019-09-04 16:36:25,854894 (MainThread): Re-using an available connection from the pool (formerly model_2).
2019-09-04 16:36:25,963811 (MainThread): Unhandled error while executing seed.minimal.seedtable
[Errno 2] No such file or directory: 'data/seedtable.csv'
2019-09-04 16:36:25,964249 (MainThread):
Traceback (most recent call last):
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 203, in safe_run
result = self.compile_and_execute(manifest, ctx)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 146, in compile_and_execute
result = self.run(ctx.node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 245, in run
return self.execute(compiled_node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 357, in execute
materialization_macro.generator(context)()
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/jinja.py", line 127, in call
return macro(*args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 575, in __call__
return self._invoke(arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 110, in _invoke
return original_invoke(self, arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 579, in _invoke
rv = self._func(*arguments)
File "<template>", line 346, in macro
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/sandbox.py", line 438, in call
return __context.call(__obj, *args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 262, in call
return __obj(*args, **kwargs)
File "/Users/jake/src/fishtown/dbt/core/dbt/context/common.py", line 364, in load_agate_table
table = dbt.clients.agate_helper.from_csv(path)
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/agate_helper.py", line 65, in from_csv
with open(abspath, encoding='utf-8') as fp:
FileNotFoundError: [Errno 2] No such file or directory: 'data/seedtable.csv'
|
FileNotFoundError
|
def parsed_instance_for(compiled: CompiledNode) -> ParsedNode:
cls = PARSED_TYPES.get(compiled.resource_type)
if cls is None:
# how???
raise ValueError("invalid resource_type: {}".format(compiled.resource_type))
# validate=False to allow extra keys from compiling
return cls.from_dict(compiled.to_dict(), validate=False)
|
def parsed_instance_for(compiled: CompiledNode) -> ParsedNode:
cls = PARSED_TYPES.get(compiled.resource_type)
if cls is None:
# how???
raise ValueError("invalid resource_type: {}".format(compiled.resource_type))
# validate=False to allow extra keys from copmiling
return cls.from_dict(compiled.to_dict(), validate=False)
|
https://github.com/fishtown-analytics/dbt/issues/1723
|
2019-09-04 16:36:25,854704 (MainThread): Acquiring new postgres connection "seedtable".
2019-09-04 16:36:25,854894 (MainThread): Re-using an available connection from the pool (formerly model_2).
2019-09-04 16:36:25,963811 (MainThread): Unhandled error while executing seed.minimal.seedtable
[Errno 2] No such file or directory: 'data/seedtable.csv'
2019-09-04 16:36:25,964249 (MainThread):
Traceback (most recent call last):
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 203, in safe_run
result = self.compile_and_execute(manifest, ctx)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 146, in compile_and_execute
result = self.run(ctx.node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 245, in run
return self.execute(compiled_node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 357, in execute
materialization_macro.generator(context)()
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/jinja.py", line 127, in call
return macro(*args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 575, in __call__
return self._invoke(arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 110, in _invoke
return original_invoke(self, arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 579, in _invoke
rv = self._func(*arguments)
File "<template>", line 346, in macro
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/sandbox.py", line 438, in call
return __context.call(__obj, *args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 262, in call
return __obj(*args, **kwargs)
File "/Users/jake/src/fishtown/dbt/core/dbt/context/common.py", line 364, in load_agate_table
table = dbt.clients.agate_helper.from_csv(path)
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/agate_helper.py", line 65, in from_csv
with open(abspath, encoding='utf-8') as fp:
FileNotFoundError: [Errno 2] No such file or directory: 'data/seedtable.csv'
|
FileNotFoundError
|
def search_key(self) -> str:
# TODO: should this be project name + path relative to project root?
return self.absolute_path
|
def search_key(self):
# TODO: should this be project root + original_file_path?
return self.absolute_path
|
https://github.com/fishtown-analytics/dbt/issues/1723
|
2019-09-04 16:36:25,854704 (MainThread): Acquiring new postgres connection "seedtable".
2019-09-04 16:36:25,854894 (MainThread): Re-using an available connection from the pool (formerly model_2).
2019-09-04 16:36:25,963811 (MainThread): Unhandled error while executing seed.minimal.seedtable
[Errno 2] No such file or directory: 'data/seedtable.csv'
2019-09-04 16:36:25,964249 (MainThread):
Traceback (most recent call last):
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 203, in safe_run
result = self.compile_and_execute(manifest, ctx)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 146, in compile_and_execute
result = self.run(ctx.node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 245, in run
return self.execute(compiled_node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 357, in execute
materialization_macro.generator(context)()
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/jinja.py", line 127, in call
return macro(*args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 575, in __call__
return self._invoke(arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 110, in _invoke
return original_invoke(self, arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 579, in _invoke
rv = self._func(*arguments)
File "<template>", line 346, in macro
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/sandbox.py", line 438, in call
return __context.call(__obj, *args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 262, in call
return __obj(*args, **kwargs)
File "/Users/jake/src/fishtown/dbt/core/dbt/context/common.py", line 364, in load_agate_table
table = dbt.clients.agate_helper.from_csv(path)
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/agate_helper.py", line 65, in from_csv
with open(abspath, encoding='utf-8') as fp:
FileNotFoundError: [Errno 2] No such file or directory: 'data/seedtable.csv'
|
FileNotFoundError
|
def original_file_path(self) -> str:
# this is mostly used for reporting errors. It doesn't show the project
# name, should it?
return os.path.join(self.searched_path, self.relative_path)
|
def original_file_path(self):
return os.path.join(self.searched_path, self.relative_path)
|
https://github.com/fishtown-analytics/dbt/issues/1723
|
2019-09-04 16:36:25,854704 (MainThread): Acquiring new postgres connection "seedtable".
2019-09-04 16:36:25,854894 (MainThread): Re-using an available connection from the pool (formerly model_2).
2019-09-04 16:36:25,963811 (MainThread): Unhandled error while executing seed.minimal.seedtable
[Errno 2] No such file or directory: 'data/seedtable.csv'
2019-09-04 16:36:25,964249 (MainThread):
Traceback (most recent call last):
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 203, in safe_run
result = self.compile_and_execute(manifest, ctx)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 146, in compile_and_execute
result = self.run(ctx.node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 245, in run
return self.execute(compiled_node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 357, in execute
materialization_macro.generator(context)()
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/jinja.py", line 127, in call
return macro(*args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 575, in __call__
return self._invoke(arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 110, in _invoke
return original_invoke(self, arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 579, in _invoke
rv = self._func(*arguments)
File "<template>", line 346, in macro
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/sandbox.py", line 438, in call
return __context.call(__obj, *args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 262, in call
return __obj(*args, **kwargs)
File "/Users/jake/src/fishtown/dbt/core/dbt/context/common.py", line 364, in load_agate_table
table = dbt.clients.agate_helper.from_csv(path)
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/agate_helper.py", line 65, in from_csv
with open(abspath, encoding='utf-8') as fp:
FileNotFoundError: [Errno 2] No such file or directory: 'data/seedtable.csv'
|
FileNotFoundError
|
def parse_args(args):
p = DBTArgumentParser(
prog="dbt",
description="""
An ELT tool for managing your SQL transformations and data models.
For more documentation on these commands, visit: docs.getdbt.com
""",
epilog="""
Specify one of these sub-commands and you can find more help from
there.
""",
)
p.add_argument(
"--version",
action="dbtversion",
help="""
Show version information
""",
)
p.add_argument(
"-r",
"--record-timing-info",
default=None,
type=str,
help="""
When this option is passed, dbt will output low-level timing stats to
the specified file. Example: `--record-timing-info output.profile`
""",
)
p.add_argument(
"-d",
"--debug",
action="store_true",
help="""
Display debug logging during dbt execution. Useful for debugging and
making bug reports.
""",
)
p.add_argument(
"--no-write-json",
action="store_false",
dest="write_json",
help="""
If set, skip writing the manifest and run_results.json files to disk
""",
)
p.add_argument(
"-S",
"--strict",
action="store_true",
help="""
Run schema validations at runtime. This will surface bugs in dbt, but
may incur a performance penalty.
""",
)
p.add_argument(
"--warn-error",
action="store_true",
help="""
If dbt would normally warn, instead raise an exception. Examples
include --models that selects nothing, deprecations, configurations
with no associated models, invalid test configurations, and missing
sources/refs in tests.
""",
)
p.add_argument(
"--partial-parse",
action="store_true",
help="""
Allow for partial parsing by looking for and writing to a pickle file
in the target directory.
WARNING: This can result in unexpected behavior if you use env_var()!
""",
)
# if set, run dbt in single-threaded mode: thread count is ignored, and
# calls go through `map` instead of the thread pool. This is useful for
# getting performance information about aspects of dbt that normally run in
# a thread, as the profiler ignores child threads. Users should really
# never use this.
p.add_argument(
"--single-threaded",
action="store_true",
help=argparse.SUPPRESS,
)
# if set, extract all models and blocks with the jinja block extractor, and
# verify that we don't fail anywhere the actual jinja parser passes. The
# reverse (passing files that ends up failing jinja) is fine.
p.add_argument("--test-new-parser", action="store_true", help=argparse.SUPPRESS)
subs = p.add_subparsers(title="Available sub-commands")
base_subparser = _build_base_subparser()
# make the subcommands that have their own subcommands
docs_sub = _build_docs_subparser(subs, base_subparser)
docs_subs = docs_sub.add_subparsers(title="Available sub-commands")
source_sub = _build_source_subparser(subs, base_subparser)
source_subs = source_sub.add_subparsers(title="Available sub-commands")
_build_init_subparser(subs, base_subparser)
_build_clean_subparser(subs, base_subparser)
_build_debug_subparser(subs, base_subparser)
_build_deps_subparser(subs, base_subparser)
_build_list_subparser(subs, base_subparser)
snapshot_sub = _build_snapshot_subparser(subs, base_subparser)
rpc_sub = _build_rpc_subparser(subs, base_subparser)
run_sub = _build_run_subparser(subs, base_subparser)
compile_sub = _build_compile_subparser(subs, base_subparser)
generate_sub = _build_docs_generate_subparser(docs_subs, base_subparser)
test_sub = _build_test_subparser(subs, base_subparser)
seed_sub = _build_seed_subparser(subs, base_subparser)
# --threads, --no-version-check
_add_common_arguments(
run_sub, compile_sub, generate_sub, test_sub, rpc_sub, seed_sub
)
# --models, --exclude
_add_selection_arguments(run_sub, compile_sub, generate_sub, test_sub)
_add_selection_arguments(snapshot_sub, models_name="select")
# --full-refresh
_add_table_mutability_arguments(run_sub, compile_sub)
_build_docs_serve_subparser(docs_subs, base_subparser)
_build_source_snapshot_freshness_subparser(source_subs, base_subparser)
_build_run_operation_subparser(subs, base_subparser)
if len(args) == 0:
p.print_help()
sys.exit(1)
parsed = p.parse_args(args)
parsed.profiles_dir = os.path.expanduser(parsed.profiles_dir)
if not hasattr(parsed, "which"):
# the user did not provide a valid subcommand. trigger the help message
# and exit with a error
p.print_help()
p.exit(1)
return parsed
|
def parse_args(args):
p = DBTArgumentParser(
prog="dbt",
description="""
An ELT tool for managing your SQL transformations and data models.
For more documentation on these commands, visit: docs.getdbt.com
""",
epilog="""
Specify one of these sub-commands and you can find more help from
there.
""",
)
p.add_argument(
"--version",
action="dbtversion",
help="""
Show version information
""",
)
p.add_argument(
"-r",
"--record-timing-info",
default=None,
type=str,
help="""
When this option is passed, dbt will output low-level timing stats to
the specified file. Example: `--record-timing-info output.profile`
""",
)
p.add_argument(
"-d",
"--debug",
action="store_true",
help="""
Display debug logging during dbt execution. Useful for debugging and
making bug reports.
""",
)
p.add_argument(
"--no-write-json",
action="store_false",
dest="write_json",
help="""
If set, skip writing the manifest and run_results.json files to disk
""",
)
p.add_argument(
"-S",
"--strict",
action="store_true",
help="""
Run schema validations at runtime. This will surface bugs in dbt, but
may incur a performance penalty.
""",
)
p.add_argument(
"--warn-error",
action="store_true",
help="""
If dbt would normally warn, instead raise an exception. Examples
include --models that selects nothing, deprecations, configurations
with no associated models, invalid test configurations, and missing
sources/refs in tests.
""",
)
p.add_argument(
"--partial-parse",
action="store_true",
help="""
Allow for partial parsing by looking for and writing to a pickle file
in the target directory.
WARNING: This can result in unexpected behavior if you use env_var()!
""",
)
# if set, run dbt in single-threaded mode: thread count is ignored, and
# calls go through `map` instead of the thread pool. This is useful for
# getting performance information about aspects of dbt that normally run in
# a thread, as the profiler ignores child threads. Users should really
# never use this.
p.add_argument(
"--single-threaded",
action="store_true",
help=argparse.SUPPRESS,
)
# if set, extract all models and blocks with the jinja block extractor, and
# verify that we don't fail anywhere the actual jinja parser passes. The
# reverse (passing files that ends up failing jinja) is fine.
p.add_argument("--test-new-parser", action="store_true", help=argparse.SUPPRESS)
subs = p.add_subparsers(title="Available sub-commands")
base_subparser = _build_base_subparser()
# make the subcommands that have their own subcommands
docs_sub = _build_docs_subparser(subs, base_subparser)
docs_subs = docs_sub.add_subparsers(title="Available sub-commands")
source_sub = _build_source_subparser(subs, base_subparser)
source_subs = source_sub.add_subparsers(title="Available sub-commands")
_build_init_subparser(subs, base_subparser)
_build_clean_subparser(subs, base_subparser)
_build_debug_subparser(subs, base_subparser)
_build_deps_subparser(subs, base_subparser)
_build_list_subparser(subs, base_subparser)
snapshot_sub = _build_snapshot_subparser(subs, base_subparser)
rpc_sub = _build_rpc_subparser(subs, base_subparser)
run_sub = _build_run_subparser(subs, base_subparser)
compile_sub = _build_compile_subparser(subs, base_subparser)
generate_sub = _build_docs_generate_subparser(docs_subs, base_subparser)
test_sub = _build_test_subparser(subs, base_subparser)
# --threads, --no-version-check
_add_common_arguments(run_sub, compile_sub, generate_sub, test_sub, rpc_sub)
# --models, --exclude
_add_selection_arguments(run_sub, compile_sub, generate_sub, test_sub)
_add_selection_arguments(snapshot_sub, models_name="select")
# --full-refresh
_add_table_mutability_arguments(run_sub, compile_sub)
_build_seed_subparser(subs, base_subparser)
_build_docs_serve_subparser(docs_subs, base_subparser)
_build_source_snapshot_freshness_subparser(source_subs, base_subparser)
_build_run_operation_subparser(subs, base_subparser)
if len(args) == 0:
p.print_help()
sys.exit(1)
parsed = p.parse_args(args)
parsed.profiles_dir = os.path.expanduser(parsed.profiles_dir)
if not hasattr(parsed, "which"):
# the user did not provide a valid subcommand. trigger the help message
# and exit with a error
p.print_help()
p.exit(1)
return parsed
|
https://github.com/fishtown-analytics/dbt/issues/1723
|
2019-09-04 16:36:25,854704 (MainThread): Acquiring new postgres connection "seedtable".
2019-09-04 16:36:25,854894 (MainThread): Re-using an available connection from the pool (formerly model_2).
2019-09-04 16:36:25,963811 (MainThread): Unhandled error while executing seed.minimal.seedtable
[Errno 2] No such file or directory: 'data/seedtable.csv'
2019-09-04 16:36:25,964249 (MainThread):
Traceback (most recent call last):
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 203, in safe_run
result = self.compile_and_execute(manifest, ctx)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 146, in compile_and_execute
result = self.run(ctx.node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 245, in run
return self.execute(compiled_node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 357, in execute
materialization_macro.generator(context)()
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/jinja.py", line 127, in call
return macro(*args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 575, in __call__
return self._invoke(arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 110, in _invoke
return original_invoke(self, arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 579, in _invoke
rv = self._func(*arguments)
File "<template>", line 346, in macro
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/sandbox.py", line 438, in call
return __context.call(__obj, *args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 262, in call
return __obj(*args, **kwargs)
File "/Users/jake/src/fishtown/dbt/core/dbt/context/common.py", line 364, in load_agate_table
table = dbt.clients.agate_helper.from_csv(path)
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/agate_helper.py", line 65, in from_csv
with open(abspath, encoding='utf-8') as fp:
FileNotFoundError: [Errno 2] No such file or directory: 'data/seedtable.csv'
|
FileNotFoundError
|
def get_paths(self) -> List[FilePath]:
path = FilePath(
project_root=self.project.project_root,
searched_path=".",
relative_path="dbt_project.yml",
)
return [path]
|
def get_paths(self):
searched_path = "."
relative_path = "dbt_project.yml"
absolute_path = os.path.normcase(
os.path.abspath(
os.path.join(self.project.project_root, searched_path, relative_path)
)
)
path = FilePath(
searched_path=".",
relative_path="relative_path",
absolute_path=absolute_path,
)
return [path]
|
https://github.com/fishtown-analytics/dbt/issues/1723
|
2019-09-04 16:36:25,854704 (MainThread): Acquiring new postgres connection "seedtable".
2019-09-04 16:36:25,854894 (MainThread): Re-using an available connection from the pool (formerly model_2).
2019-09-04 16:36:25,963811 (MainThread): Unhandled error while executing seed.minimal.seedtable
[Errno 2] No such file or directory: 'data/seedtable.csv'
2019-09-04 16:36:25,964249 (MainThread):
Traceback (most recent call last):
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 203, in safe_run
result = self.compile_and_execute(manifest, ctx)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 146, in compile_and_execute
result = self.run(ctx.node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 245, in run
return self.execute(compiled_node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 357, in execute
materialization_macro.generator(context)()
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/jinja.py", line 127, in call
return macro(*args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 575, in __call__
return self._invoke(arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 110, in _invoke
return original_invoke(self, arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 579, in _invoke
rv = self._func(*arguments)
File "<template>", line 346, in macro
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/sandbox.py", line 438, in call
return __context.call(__obj, *args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 262, in call
return __obj(*args, **kwargs)
File "/Users/jake/src/fishtown/dbt/core/dbt/context/common.py", line 364, in load_agate_table
table = dbt.clients.agate_helper.from_csv(path)
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/agate_helper.py", line 65, in from_csv
with open(abspath, encoding='utf-8') as fp:
FileNotFoundError: [Errno 2] No such file or directory: 'data/seedtable.csv'
|
FileNotFoundError
|
def __iter__(self) -> Iterator[FilePath]:
ext = "[!.#~]*" + self.extension
root = self.project.project_root
for result in find_matching(root, self.relative_dirs, ext):
if "searched_path" not in result or "relative_path" not in result:
raise InternalException(
"Invalid result from find_matching: {}".format(result)
)
file_match = FilePath(
searched_path=result["searched_path"],
relative_path=result["relative_path"],
project_root=root,
)
yield file_match
|
def __iter__(self) -> Iterator[FilePath]:
ext = "[!.#~]*" + self.extension
root = self.project.project_root
for result in find_matching(root, self.relative_dirs, ext):
file_match = FilePath(**{k: os.path.normcase(v) for k, v in result.items()})
yield file_match
|
https://github.com/fishtown-analytics/dbt/issues/1723
|
2019-09-04 16:36:25,854704 (MainThread): Acquiring new postgres connection "seedtable".
2019-09-04 16:36:25,854894 (MainThread): Re-using an available connection from the pool (formerly model_2).
2019-09-04 16:36:25,963811 (MainThread): Unhandled error while executing seed.minimal.seedtable
[Errno 2] No such file or directory: 'data/seedtable.csv'
2019-09-04 16:36:25,964249 (MainThread):
Traceback (most recent call last):
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 203, in safe_run
result = self.compile_and_execute(manifest, ctx)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 146, in compile_and_execute
result = self.run(ctx.node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 245, in run
return self.execute(compiled_node, manifest)
File "/Users/jake/src/fishtown/dbt/core/dbt/node_runners.py", line 357, in execute
materialization_macro.generator(context)()
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/jinja.py", line 127, in call
return macro(*args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 575, in __call__
return self._invoke(arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 110, in _invoke
return original_invoke(self, arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 579, in _invoke
rv = self._func(*arguments)
File "<template>", line 346, in macro
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/sandbox.py", line 438, in call
return __context.call(__obj, *args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36/lib/python3.6/site-packages/jinja2/runtime.py", line 262, in call
return __obj(*args, **kwargs)
File "/Users/jake/src/fishtown/dbt/core/dbt/context/common.py", line 364, in load_agate_table
table = dbt.clients.agate_helper.from_csv(path)
File "/Users/jake/src/fishtown/dbt/core/dbt/clients/agate_helper.py", line 65, in from_csv
with open(abspath, encoding='utf-8') as fp:
FileNotFoundError: [Errno 2] No such file or directory: 'data/seedtable.csv'
|
FileNotFoundError
|
def _parse_column(self, target, column, package_name, root_dir, path, refs):
# this should yield ParsedNodes where resource_type == NodeType.Test
column_name = column["name"]
description = column.get("description", "")
refs.add(column_name, description)
context = {"doc": dbt.context.parser.docs(target, refs.docrefs, column_name)}
get_rendered(description, context)
for test in column.get("tests", []):
try:
yield self.build_test_node(
target, package_name, test, root_dir, path, column_name
)
except dbt.exceptions.CompilationException as exc:
dbt.exceptions.warn_or_error(
"Compilation warning: Invalid test config given in {}:\n\t{}".format(
path, exc.msg
),
None,
)
continue
|
def _parse_column(self, target, column, package_name, root_dir, path, refs):
# this should yield ParsedNodes where resource_type == NodeType.Test
column_name = column["name"]
description = column.get("description", "")
refs.add(column_name, description)
context = {"doc": dbt.context.parser.docs(target, refs.docrefs, column_name)}
get_rendered(description, context)
for test in column.get("tests", []):
try:
yield self.build_test_node(
target, package_name, test, root_dir, path, column_name
)
except dbt.exceptions.CompilationException as exc:
dbt.exceptions.warn_or_error("in {}: {}".format(path, exc.msg), None)
continue
|
https://github.com/fishtown-analytics/dbt/issues/1325
|
2019-02-27 12:11:45,010 (MainThread): Parsing test.dbt_utils.at_least_one_eng_active_subs_next_fy_earliest_subscription_engagement_date
2019-02-27 12:11:45,014 (MainThread): Flushing usage events
2019-02-27 12:11:45,014 (MainThread): Encountered an error:
2019-02-27 12:11:45,014 (MainThread): list indices must be integers or slices, not dict
2019-02-27 12:11:45,019 (MainThread): Traceback (most recent call last):
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/main.py", line 77, in main
results, succeeded = handle_and_check(args)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/main.py", line 151, in handle_and_check
task, res = run_from_args(parsed)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/main.py", line 207, in run_from_args
results = run_from_task(task, cfg, parsed)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/main.py", line 215, in run_from_task
result = task.run()
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/task/test.py", line 43, in run
results = RunManager(self.config, query, TestRunner).run()
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/runner.py", line 37, in __init__
manifest, linker = self.compile(self.config)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/runner.py", line 216, in compile
return compiler.compile()
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/compilation.py", line 246, in compile
manifest = dbt.loader.GraphLoader.load_all(self.config, all_projects)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/loader.py", line 140, in load_all
return cls(project_config, all_projects).load()
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/loader.py", line 122, in load
self._load_schema_tests()
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/loader.py", line 101, in _load_schema_tests
relative_dirs=project.source_paths
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/parser/schemas.py", line 459, in load_and_parse
for result_type, node in v2_results:
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/parser/schemas.py", line 381, in parse_v2_yml
for node_type, node in iterator:
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/parser/schemas.py", line 421, in parse_model
test_args, root_dir, path)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/parser/schemas.py", line 226, in build_node
original_file_path)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/parser/schemas.py", line 159, in build_unparsed_node
test_args)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/parser/schemas.py", line 75, in build_test_raw_sql
kwargs = [as_kwarg(key, test_args[key]) for key in sorted(test_args)]
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/parser/schemas.py", line 75, in <listcomp>
kwargs = [as_kwarg(key, test_args[key]) for key in sorted(test_args)]
TypeError: list indices must be integers or slices, not dict
|
TypeError
|
def parse_models_entry(self, model_dict, path, package_name, root_dir):
model_name = model_dict["name"]
refs = ParserRef()
for column in model_dict.get("columns", []):
column_tests = self._parse_column(
model_dict, column, package_name, root_dir, path, refs
)
for node in column_tests:
yield "test", node
for test in model_dict.get("tests", []):
try:
node = self.build_test_node(model_dict, package_name, test, root_dir, path)
except dbt.exceptions.CompilationException as exc:
dbt.exceptions.warn_or_error(
"Compilation warning: Invalid test config given in {}:\n\t{}".format(
path, exc.msg
),
None,
)
continue
yield "test", node
context = {"doc": dbt.context.parser.docs(model_dict, refs.docrefs)}
description = model_dict.get("description", "")
get_rendered(description, context)
patch = ParsedNodePatch(
name=model_name,
original_file_path=path,
description=description,
columns=refs.column_info,
docrefs=refs.docrefs,
)
yield "patch", patch
|
def parse_models_entry(self, model_dict, path, package_name, root_dir):
model_name = model_dict["name"]
refs = ParserRef()
for column in model_dict.get("columns", []):
column_tests = self._parse_column(
model_dict, column, package_name, root_dir, path, refs
)
for node in column_tests:
yield "test", node
for test in model_dict.get("tests", []):
try:
node = self.build_test_node(model_dict, package_name, test, root_dir, path)
except dbt.exceptions.CompilationException as exc:
dbt.exceptions.warn_or_error("in {}: {}".format(path, exc.msg), test)
continue
yield "test", node
context = {"doc": dbt.context.parser.docs(model_dict, refs.docrefs)}
description = model_dict.get("description", "")
get_rendered(description, context)
patch = ParsedNodePatch(
name=model_name,
original_file_path=path,
description=description,
columns=refs.column_info,
docrefs=refs.docrefs,
)
yield "patch", patch
|
https://github.com/fishtown-analytics/dbt/issues/1325
|
2019-02-27 12:11:45,010 (MainThread): Parsing test.dbt_utils.at_least_one_eng_active_subs_next_fy_earliest_subscription_engagement_date
2019-02-27 12:11:45,014 (MainThread): Flushing usage events
2019-02-27 12:11:45,014 (MainThread): Encountered an error:
2019-02-27 12:11:45,014 (MainThread): list indices must be integers or slices, not dict
2019-02-27 12:11:45,019 (MainThread): Traceback (most recent call last):
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/main.py", line 77, in main
results, succeeded = handle_and_check(args)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/main.py", line 151, in handle_and_check
task, res = run_from_args(parsed)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/main.py", line 207, in run_from_args
results = run_from_task(task, cfg, parsed)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/main.py", line 215, in run_from_task
result = task.run()
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/task/test.py", line 43, in run
results = RunManager(self.config, query, TestRunner).run()
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/runner.py", line 37, in __init__
manifest, linker = self.compile(self.config)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/runner.py", line 216, in compile
return compiler.compile()
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/compilation.py", line 246, in compile
manifest = dbt.loader.GraphLoader.load_all(self.config, all_projects)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/loader.py", line 140, in load_all
return cls(project_config, all_projects).load()
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/loader.py", line 122, in load
self._load_schema_tests()
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/loader.py", line 101, in _load_schema_tests
relative_dirs=project.source_paths
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/parser/schemas.py", line 459, in load_and_parse
for result_type, node in v2_results:
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/parser/schemas.py", line 381, in parse_v2_yml
for node_type, node in iterator:
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/parser/schemas.py", line 421, in parse_model
test_args, root_dir, path)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/parser/schemas.py", line 226, in build_node
original_file_path)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/parser/schemas.py", line 159, in build_unparsed_node
test_args)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/parser/schemas.py", line 75, in build_test_raw_sql
kwargs = [as_kwarg(key, test_args[key]) for key in sorted(test_args)]
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/parser/schemas.py", line 75, in <listcomp>
kwargs = [as_kwarg(key, test_args[key]) for key in sorted(test_args)]
TypeError: list indices must be integers or slices, not dict
|
TypeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.