after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def accept_worker():
while True:
try:
sock, (other_host, other_port) = listener.accept()
except OSError:
# Listener socket has been closed.
break
log.info(
"Accepted incoming {0} connection from {1}:{2}.",
name,
other_host,
other_port,
)
handler(sock)
|
def accept_worker():
while True:
try:
sock, (other_host, other_port) = cls.listener.accept()
except OSError:
# Listener socket has been closed.
break
log.info(
"Accepted incoming {0} connection from {1}:{2}.",
name,
other_host,
other_port,
)
cls(sock)
|
https://github.com/microsoft/debugpy/issues/20
|
C:\Python37\python.exe c:\Users\kanadig\.vscode\extensions\ms-python.python-2020.2.60897-dev\pythonFiles\lib\python\new_ptvsd\wheels\ptvsd\launcher C:\GIT\issues\issue9214/test_mp.py
Could not connect to 127.0.0.1: 50734
Traceback (most recent call last):
File "c:\Users\kanadig\.vscode\extensions\ms-python.python-2020.2.60897-dev\pythonFiles\lib\python\new_ptvsd\wheels\ptvsd\_vendored\pydevd\_pydevd_bundle\pydevd_comm.py", line 514, in start_client
s.connect((host, port))
ConnectionRefusedError: [WinError 10061] No connection could be made because the target machine actively refused it
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "c:\Users\kanadig\.vscode\extensions\ms-python.python-2020.2.60897-dev\pythonFiles\lib\python\new_ptvsd\wheels\ptvsd\_vendored\pydevd\pydevd.py", line 2550, in settrace
client_access_token,
File "c:\Users\kanadig\.vscode\extensions\ms-python.python-2020.2.60897-dev\pythonFiles\lib\python\new_ptvsd\wheels\ptvsd\_vendored\pydevd\pydevd.py", line 2610, in _locked_settrace
py_db.connect(host, port) # Note: connect can raise error.
File "c:\Users\kanadig\.vscode\extensions\ms-python.python-2020.2.60897-dev\pythonFiles\lib\python\new_ptvsd\wheels\ptvsd\_vendored\pydevd\pydevd.py", line 1179, in connect
s = start_client(host, port)
File "c:\Users\kanadig\.vscode\extensions\ms-python.python-2020.2.60897-dev\pythonFiles\lib\python\new_ptvsd\wheels\ptvsd\_vendored\pydevd\_pydevd_bundle\pydevd_comm.py", line 514, in start_client
s.connect((host, port))
ConnectionRefusedError: [WinError 10061] No connection could be made because the target machine actively refused it
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "C:\Python37\lib\concurrent\futures\process.py", line 102, in _python_exit
thread_wakeup.wakeup()
File "C:\Python37\lib\concurrent\futures\process.py", line 90, in wakeup
self._writer.send_bytes(b"")
File "C:\Python37\lib\multiprocessing\connection.py", line 183, in send_bytes
self._check_closed()
File "C:\Python37\lib\multiprocessing\connection.py", line 136, in _check_closed
raise OSError("handle is closed")
OSError: handle is closed
|
ConnectionRefusedError
|
def add_policy(bundle):
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={})
bodycontent = request_inputs["bodycontent"]
params = request_inputs["params"]
return_object = []
httpcode = 500
userId = request_inputs["userId"]
try:
logger.debug("Adding policy")
client = internal_client_for(CatalogClient, request_inputs["userId"])
jsondata = json.loads(bodycontent)
# schema check
try:
localconfig = anchore_engine.configuration.localconfig.get_config()
user_auth = localconfig["system_user_auth"]
verify = localconfig.get("internal_ssl_verify", True)
p_client = internal_client_for(PolicyEngineClient, userId=userId)
response = p_client.validate_bundle(jsondata)
if not response.get("valid", False):
httpcode = 400
return_object = anchore_engine.common.helpers.make_response_error(
"Bundle failed validation",
in_httpcode=400,
detail=response.get("validation_details"),
)
return (return_object, httpcode)
except Exception as err:
raise Exception(
"Error response from policy service during bundle validation. Validation could not be performed: {}".format(
err
)
)
if "id" in jsondata and jsondata["id"]:
policyId = jsondata["id"]
else:
policyId = hashlib.md5(
str(userId + ":" + jsondata["name"]).encode("utf8")
).hexdigest()
jsondata["id"] = policyId
try:
policybundle = jsondata
policy_record = client.add_policy(policybundle)
except Exception as err:
raise Exception(
"cannot store policy data to catalog - exception: " + str(err)
)
if policy_record:
return_object = make_response_policy(policy_record, params)
httpcode = 200
else:
raise Exception("failed to add policy to catalog DB")
except Exception as err:
logger.debug("operation exception: " + str(err))
return_object = anchore_engine.common.helpers.make_response_error(
err, in_httpcode=httpcode
)
httpcode = return_object["httpcode"]
return (return_object, httpcode)
|
def add_policy(bundle):
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={})
bodycontent = request_inputs["bodycontent"]
params = request_inputs["params"]
return_object = []
httpcode = 500
userId = request_inputs["userId"]
try:
logger.debug("Adding policy")
client = internal_client_for(CatalogClient, request_inputs["userId"])
jsondata = json.loads(bodycontent)
# schema check
try:
localconfig = anchore_engine.configuration.localconfig.get_config()
user_auth = localconfig["system_user_auth"]
verify = localconfig.get("internal_ssl_verify", True)
p_client = internal_client_for(PolicyEngineClient, userId=userId)
response = p_client.validate_bundle(jsondata)
if not response.get("valid", False):
httpcode = 400
return_object = anchore_engine.common.helpers.make_response_error(
"Bundle failed validation",
in_httpcode=400,
detail={
"validation_details": [
x.to_dict() for x in response.validation_details
]
},
)
return (return_object, httpcode)
except Exception as err:
raise Exception(
"Error response from policy service during bundle validation. Validation could not be performed: {}".format(
err
)
)
if "id" in jsondata and jsondata["id"]:
policyId = jsondata["id"]
else:
policyId = hashlib.md5(
str(userId + ":" + jsondata["name"]).encode("utf8")
).hexdigest()
jsondata["id"] = policyId
try:
policybundle = jsondata
policy_record = client.add_policy(policybundle)
except Exception as err:
raise Exception(
"cannot store policy data to catalog - exception: " + str(err)
)
if policy_record:
return_object = make_response_policy(policy_record, params)
httpcode = 200
else:
raise Exception("failed to add policy to catalog DB")
except Exception as err:
logger.debug("operation exception: " + str(err))
return_object = anchore_engine.common.helpers.make_response_error(
err, in_httpcode=httpcode
)
httpcode = return_object["httpcode"]
return (return_object, httpcode)
|
https://github.com/anchore/anchore-engine/issues/124
|
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 234, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.evaluate(image_obj, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gates/image_metadata.py", line 59, in evaluate
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] rval = type(img_val)(rval)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] ValueError: invalid literal for int() with base 10: '4gb'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 453, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.configured_trigger.execute(image_obj, exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 237, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self, message='Error executing gate {} trigger {} with params: {}. Msg: {}'.format(self.gate_cls.__gate_name__, self.__trigger_name__, self.eval_params, e.message))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] AttributeError: 'ValueError' object has no attribute 'message'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 481, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] eval_result = executable_bundle.execute(img_obj, tag, ExecutionContext(db_session=db, configuration={}))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1260, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] bundle_exec = self._process_mapping_result(bundle_exec, image_object, tag, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1207, in _process_mapping_result
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errors, policy_decision = evaluated_policy.execute(image_obj=image_object, context=context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 596, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errs, matches = rule.execute(image_obj=image_obj, exec_context=exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 458, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self.configured_trigger, message='Could not evaluate trigger due to error in evaluation execution')
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] anchore_engine.services.policy_engine.engine.policy.exceptions.TriggerEvaluationError: TriggerEvaluationError: severity:error message:Trigger evaluation failed for gate metadata and trigger attribute, with parameters: ([]) due to: Could not evaluate trigger due to error in evaluation execution
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/apis/authorization.py", line 339, in inner_wrapper
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return f(*args, **kwargs)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 484, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] abort(Response(response='Cannot execute given policy against the image due to errors executing the policy bundle: {}'.format(e.message), status=500))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/werkzeug/exceptions.py", line 707, in abort
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return _aborter(status, *args, **kwargs)
|
ValueError
|
def delete_image(user_id, image_id):
"""
DELETE the image and all resources for it. Returns 204 - No Content on success
:param user_id:
:param image_id:
:return:
"""
db = get_session()
try:
log.info(
"Deleting image {}/{} and all associated resources".format(
user_id, image_id
)
)
img = db.query(Image).get((image_id, user_id))
if img:
for pkg_vuln in img.vulnerabilities():
db.delete(pkg_vuln)
# for pkg_vuln in img.java_vulnerabilities():
# db.delete(pkg_vuln)
try:
mgr = EvaluationCacheManager(img, None, None)
mgr.flush()
except Exception as ex:
log.exception(
"Could not delete evaluations for image {}/{} in the cache. May be orphaned".format(
user_id, image_id
)
)
db.delete(img)
db.commit()
else:
db.rollback()
# Idempotently return 204. This isn't properly RESTY, but idempotency on delete makes clients much cleaner.
return (None, 204)
except HTTPException:
raise
except Exception as e:
log.exception(
"Error processing DELETE request for image {}/{}".format(user_id, image_id)
)
db.rollback()
return make_response_error(
"Error deleting image {}/{}: {}".format(user_id, image_id, e),
in_httpcode=500,
), 500
|
def delete_image(user_id, image_id):
"""
DELETE the image and all resources for it. Returns 204 - No Content on success
:param user_id:
:param image_id:
:return:
"""
db = get_session()
try:
log.info(
"Deleting image {}/{} and all associated resources".format(
user_id, image_id
)
)
img = db.query(Image).get((image_id, user_id))
if img:
for pkg_vuln in img.vulnerabilities():
db.delete(pkg_vuln)
# for pkg_vuln in img.java_vulnerabilities():
# db.delete(pkg_vuln)
try:
mgr = EvaluationCacheManager(img, None, None)
mgr.flush()
except Exception as ex:
log.exception(
"Could not delete evaluations for image {}/{} in the cache. May be orphaned".format(
user_id, image_id
)
)
db.delete(img)
db.commit()
else:
db.rollback()
# Idempotently return 204. This isn't properly RESTY, but idempotency on delete makes clients much cleaner.
return (None, 204)
except HTTPException:
raise
except Exception:
log.exception(
"Error processing DELETE request for image {}/{}".format(user_id, image_id)
)
db.rollback()
abort(500)
|
https://github.com/anchore/anchore-engine/issues/124
|
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 234, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.evaluate(image_obj, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gates/image_metadata.py", line 59, in evaluate
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] rval = type(img_val)(rval)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] ValueError: invalid literal for int() with base 10: '4gb'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 453, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.configured_trigger.execute(image_obj, exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 237, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self, message='Error executing gate {} trigger {} with params: {}. Msg: {}'.format(self.gate_cls.__gate_name__, self.__trigger_name__, self.eval_params, e.message))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] AttributeError: 'ValueError' object has no attribute 'message'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 481, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] eval_result = executable_bundle.execute(img_obj, tag, ExecutionContext(db_session=db, configuration={}))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1260, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] bundle_exec = self._process_mapping_result(bundle_exec, image_object, tag, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1207, in _process_mapping_result
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errors, policy_decision = evaluated_policy.execute(image_obj=image_object, context=context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 596, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errs, matches = rule.execute(image_obj=image_obj, exec_context=exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 458, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self.configured_trigger, message='Could not evaluate trigger due to error in evaluation execution')
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] anchore_engine.services.policy_engine.engine.policy.exceptions.TriggerEvaluationError: TriggerEvaluationError: severity:error message:Trigger evaluation failed for gate metadata and trigger attribute, with parameters: ([]) due to: Could not evaluate trigger due to error in evaluation execution
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/apis/authorization.py", line 339, in inner_wrapper
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return f(*args, **kwargs)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 484, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] abort(Response(response='Cannot execute given policy against the image due to errors executing the policy bundle: {}'.format(e.message), status=500))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/werkzeug/exceptions.py", line 707, in abort
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return _aborter(status, *args, **kwargs)
|
ValueError
|
def check_user_image_inline(user_id, image_id, tag, bundle):
"""
Execute a policy evaluation using the info in the request body including the bundle content
:param user_id:
:param image_id:
:param tag:
:param bundle:
:return:
"""
timer = time.time()
db = get_session()
cache_mgr = None
try:
# Input validation
if tag is None:
# set tag value to a value that only matches wildcards
tag = "*/*:*"
try:
img_obj = db.query(Image).get((image_id, user_id))
except:
return make_response_error("Image not found", in_httpcode=404), 404
if not img_obj:
log.info(
"Request for evaluation of image that cannot be found: user_id = {}, image_id = {}".format(
user_id, image_id
)
)
return make_response_error("Image not found", in_httpcode=404), 404
if evaluation_cache_enabled:
timer2 = time.time()
try:
try:
cache_mgr = EvaluationCacheManager(img_obj, tag, bundle)
except ValueError as err:
log.warn(
"Could not leverage cache due to error in bundle data: {}".format(
err
)
)
cache_mgr = None
if cache_mgr is None:
log.info(
"Could not initialize cache manager for policy evaluation, skipping cache usage"
)
else:
cached_result = cache_mgr.refresh()
if cached_result:
metrics.counter_inc(name="anchore_policy_evaluation_cache_hits")
metrics.histogram_observe(
"anchore_policy_evaluation_cache_access_latency",
time.time() - timer2,
status="hit",
)
log.info(
"Returning cached result of policy evaluation for {}/{}, with tag {} and bundle {} with digest {}. Last evaluation: {}".format(
user_id,
image_id,
tag,
cache_mgr.bundle_id,
cache_mgr.bundle_digest,
cached_result.get("last_modified"),
)
)
return cached_result
else:
metrics.counter_inc(
name="anchore_policy_evaluation_cache_misses"
)
metrics.histogram_observe(
"anchore_policy_evaluation_cache_access_latency",
time.time() - timer2,
status="miss",
)
log.info(
"Policy evaluation not cached, or invalid, executing evaluation for {}/{} with tag {} and bundle {} with digest {}".format(
user_id,
image_id,
tag,
cache_mgr.bundle_id,
cache_mgr.bundle_digest,
)
)
except Exception as ex:
log.exception(
"Unexpected error operating on policy evaluation cache. Skipping use of cache."
)
else:
log.info("Policy evaluation cache disabled. Executing evaluation")
# Build bundle exec.
problems = []
executable_bundle = None
try:
# Allow deprecated gates here to support upgrade cases from old policy bundles.
executable_bundle = build_bundle(bundle, for_tag=tag, allow_deprecated=True)
if executable_bundle.init_errors:
problems = executable_bundle.init_errors
except InitializationError as e:
log.exception("Bundle construction and initialization returned errors")
problems = e.causes
eval_result = None
if not problems:
# Execute bundle
try:
eval_result = executable_bundle.execute(
img_obj, tag, ExecutionContext(db_session=db, configuration={})
)
except Exception as e:
log.exception(
"Error executing policy bundle {} against image {} w/tag {}: {}".format(
bundle["id"], image_id, tag, e
)
)
return make_response_error(
"Internal bundle evaluation error",
detail="Cannot execute given policy against the image due to errors executing the policy bundle: {}".format(
e
),
in_httpcode=500,
), 500
else:
# Construct a failure eval with details on the errors and mappings to send to client
eval_result = build_empty_error_execution(
img_obj, tag, executable_bundle, errors=problems, warnings=[]
)
if (
executable_bundle
and executable_bundle.mapping
and len(executable_bundle.mapping.mapping_rules) == 1
):
eval_result.executed_mapping = executable_bundle.mapping.mapping_rules[
0
]
resp = PolicyEvaluation()
resp.user_id = user_id
resp.image_id = image_id
resp.tag = tag
resp.bundle = bundle
resp.matched_mapping_rule = (
eval_result.executed_mapping.json()
if eval_result.executed_mapping
else False
)
resp.last_modified = int(time.time())
resp.final_action = eval_result.bundle_decision.final_decision.name
resp.final_action_reason = eval_result.bundle_decision.reason
resp.matched_whitelisted_images_rule = (
eval_result.bundle_decision.whitelisted_image.json()
if eval_result.bundle_decision.whitelisted_image
else False
)
resp.matched_blacklisted_images_rule = (
eval_result.bundle_decision.blacklisted_image.json()
if eval_result.bundle_decision.blacklisted_image
else False
)
resp.result = eval_result.as_table_json()
resp.created_at = int(time.time())
resp.evaluation_problems = [
problem_from_exception(i) for i in eval_result.errors
]
resp.evaluation_problems += [
problem_from_exception(i) for i in eval_result.warnings
]
if resp.evaluation_problems:
for i in resp.evaluation_problems:
log.warn(
"Returning evaluation response for image {}/{} w/tag {} and bundle {} that contains error: {}".format(
user_id, image_id, tag, bundle["id"], json.dumps(i.to_dict())
)
)
metrics.histogram_observe(
"anchore_policy_evaluation_time_seconds",
time.time() - timer,
status="fail",
)
else:
metrics.histogram_observe(
"anchore_policy_evaluation_time_seconds",
time.time() - timer,
status="success",
)
result = resp.to_dict()
# Never let the cache block returning results
try:
if evaluation_cache_enabled and cache_mgr is not None:
cache_mgr.save(result)
except Exception as ex:
log.exception(
"Failed saving policy result in cache. Skipping and continuing."
)
db.commit()
return result
except HTTPException as e:
db.rollback()
log.exception("Caught exception in execution: {}".format(e))
raise
except Exception as e:
db.rollback()
log.exception("Failed processing bundle evaluation: {}".format(e))
return make_response_error(
"Unexpected internal error", detail=str(e), in_httpcode=500
), 500
finally:
db.close()
|
def check_user_image_inline(user_id, image_id, tag, bundle):
"""
Execute a policy evaluation using the info in the request body including the bundle content
:param user_id:
:param image_id:
:param tag:
:param bundle:
:return:
"""
timer = time.time()
db = get_session()
cache_mgr = None
try:
# Input validation
if tag is None:
# set tag value to a value that only matches wildcards
tag = "*/*:*"
try:
img_obj = db.query(Image).get((image_id, user_id))
except:
abort(Response(response="Image not found", status=404))
if not img_obj:
log.info(
"Request for evaluation of image that cannot be found: user_id = {}, image_id = {}".format(
user_id, image_id
)
)
abort(Response(response="Image not found", status=404))
if evaluation_cache_enabled:
timer2 = time.time()
try:
try:
cache_mgr = EvaluationCacheManager(img_obj, tag, bundle)
except ValueError as err:
log.warn(
"Could not leverage cache due to error in bundle data: {}".format(
err
)
)
cache_mgr = None
if cache_mgr is None:
log.info(
"Could not initialize cache manager for policy evaluation, skipping cache usage"
)
else:
cached_result = cache_mgr.refresh()
if cached_result:
metrics.counter_inc(name="anchore_policy_evaluation_cache_hits")
metrics.histogram_observe(
"anchore_policy_evaluation_cache_access_latency",
time.time() - timer2,
status="hit",
)
log.info(
"Returning cached result of policy evaluation for {}/{}, with tag {} and bundle {} with digest {}. Last evaluation: {}".format(
user_id,
image_id,
tag,
cache_mgr.bundle_id,
cache_mgr.bundle_digest,
cached_result.get("last_modified"),
)
)
return cached_result
else:
metrics.counter_inc(
name="anchore_policy_evaluation_cache_misses"
)
metrics.histogram_observe(
"anchore_policy_evaluation_cache_access_latency",
time.time() - timer2,
status="miss",
)
log.info(
"Policy evaluation not cached, or invalid, executing evaluation for {}/{} with tag {} and bundle {} with digest {}".format(
user_id,
image_id,
tag,
cache_mgr.bundle_id,
cache_mgr.bundle_digest,
)
)
except Exception as ex:
log.exception(
"Unexpected error operating on policy evaluation cache. Skipping use of cache."
)
else:
log.info("Policy evaluation cache disabled. Executing evaluation")
# Build bundle exec.
problems = []
executable_bundle = None
try:
# Allow deprecated gates here to support upgrade cases from old policy bundles.
executable_bundle = build_bundle(bundle, for_tag=tag, allow_deprecated=True)
if executable_bundle.init_errors:
problems = executable_bundle.init_errors
except InitializationError as e:
log.exception("Bundle construction and initialization returned errors")
problems = e.causes
eval_result = None
if not problems:
# Execute bundle
try:
eval_result = executable_bundle.execute(
img_obj, tag, ExecutionContext(db_session=db, configuration={})
)
except Exception as e:
log.exception(
"Error executing policy bundle {} against image {} w/tag {}: {}".format(
bundle["id"], image_id, tag, e.message
)
)
abort(
Response(
response="Cannot execute given policy against the image due to errors executing the policy bundle: {}".format(
e.message
),
status=500,
)
)
else:
# Construct a failure eval with details on the errors and mappings to send to client
eval_result = build_empty_error_execution(
img_obj, tag, executable_bundle, errors=problems, warnings=[]
)
if (
executable_bundle
and executable_bundle.mapping
and len(executable_bundle.mapping.mapping_rules) == 1
):
eval_result.executed_mapping = executable_bundle.mapping.mapping_rules[
0
]
resp = PolicyEvaluation()
resp.user_id = user_id
resp.image_id = image_id
resp.tag = tag
resp.bundle = bundle
resp.matched_mapping_rule = (
eval_result.executed_mapping.json()
if eval_result.executed_mapping
else False
)
resp.last_modified = int(time.time())
resp.final_action = eval_result.bundle_decision.final_decision.name
resp.final_action_reason = eval_result.bundle_decision.reason
resp.matched_whitelisted_images_rule = (
eval_result.bundle_decision.whitelisted_image.json()
if eval_result.bundle_decision.whitelisted_image
else False
)
resp.matched_blacklisted_images_rule = (
eval_result.bundle_decision.blacklisted_image.json()
if eval_result.bundle_decision.blacklisted_image
else False
)
resp.result = eval_result.as_table_json()
resp.created_at = int(time.time())
resp.evaluation_problems = [
problem_from_exception(i) for i in eval_result.errors
]
resp.evaluation_problems += [
problem_from_exception(i) for i in eval_result.warnings
]
if resp.evaluation_problems:
for i in resp.evaluation_problems:
log.warn(
"Returning evaluation response for image {}/{} w/tag {} and bundle {} that contains error: {}".format(
user_id, image_id, tag, bundle["id"], json.dumps(i.to_dict())
)
)
metrics.histogram_observe(
"anchore_policy_evaluation_time_seconds",
time.time() - timer,
status="fail",
)
else:
metrics.histogram_observe(
"anchore_policy_evaluation_time_seconds",
time.time() - timer,
status="success",
)
result = resp.to_dict()
# Never let the cache block returning results
try:
if evaluation_cache_enabled and cache_mgr is not None:
cache_mgr.save(result)
except Exception as ex:
log.exception(
"Failed saving policy result in cache. Skipping and continuing."
)
db.commit()
return result
except HTTPException as e:
db.rollback()
log.exception("Caught exception in execution: {}".format(e))
raise
except Exception as e:
db.rollback()
log.exception("Failed processing bundle evaluation: {}".format(e))
abort(Response("Unexpected internal error", 500))
finally:
db.close()
|
https://github.com/anchore/anchore-engine/issues/124
|
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 234, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.evaluate(image_obj, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gates/image_metadata.py", line 59, in evaluate
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] rval = type(img_val)(rval)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] ValueError: invalid literal for int() with base 10: '4gb'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 453, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.configured_trigger.execute(image_obj, exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 237, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self, message='Error executing gate {} trigger {} with params: {}. Msg: {}'.format(self.gate_cls.__gate_name__, self.__trigger_name__, self.eval_params, e.message))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] AttributeError: 'ValueError' object has no attribute 'message'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 481, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] eval_result = executable_bundle.execute(img_obj, tag, ExecutionContext(db_session=db, configuration={}))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1260, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] bundle_exec = self._process_mapping_result(bundle_exec, image_object, tag, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1207, in _process_mapping_result
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errors, policy_decision = evaluated_policy.execute(image_obj=image_object, context=context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 596, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errs, matches = rule.execute(image_obj=image_obj, exec_context=exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 458, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self.configured_trigger, message='Could not evaluate trigger due to error in evaluation execution')
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] anchore_engine.services.policy_engine.engine.policy.exceptions.TriggerEvaluationError: TriggerEvaluationError: severity:error message:Trigger evaluation failed for gate metadata and trigger attribute, with parameters: ([]) due to: Could not evaluate trigger due to error in evaluation execution
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/apis/authorization.py", line 339, in inner_wrapper
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return f(*args, **kwargs)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 484, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] abort(Response(response='Cannot execute given policy against the image due to errors executing the policy bundle: {}'.format(e.message), status=500))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/werkzeug/exceptions.py", line 707, in abort
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return _aborter(status, *args, **kwargs)
|
ValueError
|
def get_image_vulnerabilities(user_id, image_id, force_refresh=False, vendor_only=True):
"""
Return the vulnerability listing for the specified image and load from catalog if not found and specifically asked
to do so.
Example json output:
{
"multi" : {
"url_column_index" : 7,
"result" : {
"rows" : [],
"rowcount" : 0,
"colcount" : 8,
"header" : [
"CVE_ID",
"Severity",
"*Total_Affected",
"Vulnerable_Package",
"Fix_Available",
"Fix_Images",
"Rebuild_Images",
"URL"
]
},
"querycommand" : "/usr/lib/python2.7/site-packages/anchore/anchore-modules/multi-queries/cve-scan.py /ebs_data/anchore/querytmp/queryimages.7026386 /ebs_data/anchore/data /ebs_data/anchore/querytmp/query.59057288 all",
"queryparams" : "all",
"warns" : [
"0005b136f0fb (prom/prometheus:master) cannot perform CVE scan: no CVE data is currently available for the detected base distro type (busybox:unknown_version,busybox:v1.26.2)"
]
}
}
:param user_id: user id of image to evaluate
:param image_id: image id to evaluate
:param force_refresh: if true, flush and recompute vulnerabilities rather than returning current values
:param vendor_only: if true, filter out the vulnerabilities that vendors will explicitly not address
:return:
"""
# Has image?
db = get_session()
try:
img = db.query(Image).get((image_id, user_id))
vulns = []
if not img:
abort(404)
else:
if force_refresh:
log.info(
"Forcing refresh of vulnerabiltiies for {}/{}".format(
user_id, image_id
)
)
try:
vulns = rescan_image(img, db_session=db)
db.commit()
except Exception as e:
log.exception(
"Error refreshing cve matches for image {}/{}".format(
user_id, image_id
)
)
db.rollback()
return make_response_error(
"Error refreshing vulnerability listing for image.",
in_httpcode=500,
)
db = get_session()
db.refresh(img)
vulns = img.vulnerabilities()
# Has vulnerabilities?
warns = []
if not vulns:
vulns = []
ns = DistroNamespace.for_obj(img)
if not have_vulnerabilities_for(ns):
warns = [
"No vulnerability data available for image distro: {}".format(
ns.namespace_name
)
]
rows = []
for vuln in vulns:
# Skip the vulnerability if the vendor_only flag is set to True and the issue won't be addressed by the vendor
if vendor_only and vuln.fix_has_no_advisory():
continue
cves = ""
if vuln.vulnerability.additional_metadata:
cves = " ".join(vuln.vulnerability.additional_metadata.get("cves", []))
rows.append(
[
vuln.vulnerability_id,
vuln.vulnerability.severity,
1,
vuln.pkg_name + "-" + vuln.package.fullversion,
str(vuln.fixed_in()),
vuln.pkg_image_id,
"None", # Always empty this for now
vuln.vulnerability.link,
vuln.pkg_type,
"vulnerabilities",
vuln.vulnerability.namespace_name,
vuln.pkg_name,
vuln.package.fullversion,
cves,
]
)
vuln_listing = {
"multi": {
"url_column_index": 7,
"result": {
"header": TABLE_STYLE_HEADER_LIST,
"rowcount": len(rows),
"colcount": len(TABLE_STYLE_HEADER_LIST),
"rows": rows,
},
"warns": warns,
}
}
cpe_vuln_listing = []
try:
all_cpe_matches = img.cpe_vulnerabilities()
if not all_cpe_matches:
all_cpe_matches = []
cpe_hashes = {}
for image_cpe, vulnerability_cpe in all_cpe_matches:
cpe_vuln_el = {
"vulnerability_id": vulnerability_cpe.vulnerability_id,
"severity": vulnerability_cpe.severity,
"link": vulnerability_cpe.link,
"pkg_type": image_cpe.pkg_type,
"pkg_path": image_cpe.pkg_path,
"name": image_cpe.name,
"version": image_cpe.version,
"cpe": image_cpe.get_cpestring(),
"feed_name": vulnerability_cpe.feed_name,
"feed_namespace": vulnerability_cpe.namespace_name,
}
cpe_hash = hashlib.sha256(
utils.ensure_bytes(json.dumps(cpe_vuln_el))
).hexdigest()
if not cpe_hashes.get(cpe_hash, False):
cpe_vuln_listing.append(cpe_vuln_el)
cpe_hashes[cpe_hash] = True
except Exception as err:
log.warn("could not fetch CPE matches - exception: " + str(err))
report = LegacyVulnerabilityReport.from_dict(vuln_listing)
resp = ImageVulnerabilityListing(
user_id=user_id,
image_id=image_id,
legacy_report=report,
cpe_report=cpe_vuln_listing,
)
return resp.to_dict()
except HTTPException:
db.rollback()
raise
except Exception as e:
log.exception(
"Error checking image {}, {} for vulnerabiltiies. Rolling back".format(
user_id, image_id
)
)
db.rollback()
abort(500)
finally:
db.close()
|
def get_image_vulnerabilities(user_id, image_id, force_refresh=False, vendor_only=True):
"""
Return the vulnerability listing for the specified image and load from catalog if not found and specifically asked
to do so.
Example json output:
{
"multi" : {
"url_column_index" : 7,
"result" : {
"rows" : [],
"rowcount" : 0,
"colcount" : 8,
"header" : [
"CVE_ID",
"Severity",
"*Total_Affected",
"Vulnerable_Package",
"Fix_Available",
"Fix_Images",
"Rebuild_Images",
"URL"
]
},
"querycommand" : "/usr/lib/python2.7/site-packages/anchore/anchore-modules/multi-queries/cve-scan.py /ebs_data/anchore/querytmp/queryimages.7026386 /ebs_data/anchore/data /ebs_data/anchore/querytmp/query.59057288 all",
"queryparams" : "all",
"warns" : [
"0005b136f0fb (prom/prometheus:master) cannot perform CVE scan: no CVE data is currently available for the detected base distro type (busybox:unknown_version,busybox:v1.26.2)"
]
}
}
:param user_id: user id of image to evaluate
:param image_id: image id to evaluate
:param force_refresh: if true, flush and recompute vulnerabilities rather than returning current values
:param vendor_only: if true, filter out the vulnerabilities that vendors will explicitly not address
:return:
"""
# Has image?
db = get_session()
try:
img = db.query(Image).get((image_id, user_id))
vulns = []
if not img:
abort(404)
else:
if force_refresh:
log.info(
"Forcing refresh of vulnerabiltiies for {}/{}".format(
user_id, image_id
)
)
try:
vulns = rescan_image(img, db_session=db)
db.commit()
except Exception as e:
log.exception(
"Error refreshing cve matches for image {}/{}".format(
user_id, image_id
)
)
db.rollback()
abort(
Response(
"Error refreshing vulnerability listing for image.", 500
)
)
db = get_session()
db.refresh(img)
vulns = img.vulnerabilities()
# Has vulnerabilities?
warns = []
if not vulns:
vulns = []
ns = DistroNamespace.for_obj(img)
if not have_vulnerabilities_for(ns):
warns = [
"No vulnerability data available for image distro: {}".format(
ns.namespace_name
)
]
rows = []
for vuln in vulns:
# Skip the vulnerability if the vendor_only flag is set to True and the issue won't be addressed by the vendor
if vendor_only and vuln.fix_has_no_advisory():
continue
cves = ""
if vuln.vulnerability.additional_metadata:
cves = " ".join(vuln.vulnerability.additional_metadata.get("cves", []))
rows.append(
[
vuln.vulnerability_id,
vuln.vulnerability.severity,
1,
vuln.pkg_name + "-" + vuln.package.fullversion,
str(vuln.fixed_in()),
vuln.pkg_image_id,
"None", # Always empty this for now
vuln.vulnerability.link,
vuln.pkg_type,
"vulnerabilities",
vuln.vulnerability.namespace_name,
vuln.pkg_name,
vuln.package.fullversion,
cves,
]
)
vuln_listing = {
"multi": {
"url_column_index": 7,
"result": {
"header": TABLE_STYLE_HEADER_LIST,
"rowcount": len(rows),
"colcount": len(TABLE_STYLE_HEADER_LIST),
"rows": rows,
},
"warns": warns,
}
}
cpe_vuln_listing = []
try:
all_cpe_matches = img.cpe_vulnerabilities()
if not all_cpe_matches:
all_cpe_matches = []
cpe_hashes = {}
for image_cpe, vulnerability_cpe in all_cpe_matches:
cpe_vuln_el = {
"vulnerability_id": vulnerability_cpe.vulnerability_id,
"severity": vulnerability_cpe.severity,
"link": vulnerability_cpe.link,
"pkg_type": image_cpe.pkg_type,
"pkg_path": image_cpe.pkg_path,
"name": image_cpe.name,
"version": image_cpe.version,
"cpe": image_cpe.get_cpestring(),
"feed_name": vulnerability_cpe.feed_name,
"feed_namespace": vulnerability_cpe.namespace_name,
}
cpe_hash = hashlib.sha256(
utils.ensure_bytes(json.dumps(cpe_vuln_el))
).hexdigest()
if not cpe_hashes.get(cpe_hash, False):
cpe_vuln_listing.append(cpe_vuln_el)
cpe_hashes[cpe_hash] = True
except Exception as err:
log.warn("could not fetch CPE matches - exception: " + str(err))
report = LegacyVulnerabilityReport.from_dict(vuln_listing)
resp = ImageVulnerabilityListing(
user_id=user_id,
image_id=image_id,
legacy_report=report,
cpe_report=cpe_vuln_listing,
)
return resp.to_dict()
except HTTPException:
db.rollback()
raise
except Exception as e:
log.exception(
"Error checking image {}, {} for vulnerabiltiies. Rolling back".format(
user_id, image_id
)
)
db.rollback()
abort(500)
finally:
db.close()
|
https://github.com/anchore/anchore-engine/issues/124
|
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 234, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.evaluate(image_obj, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gates/image_metadata.py", line 59, in evaluate
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] rval = type(img_val)(rval)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] ValueError: invalid literal for int() with base 10: '4gb'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 453, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.configured_trigger.execute(image_obj, exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 237, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self, message='Error executing gate {} trigger {} with params: {}. Msg: {}'.format(self.gate_cls.__gate_name__, self.__trigger_name__, self.eval_params, e.message))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] AttributeError: 'ValueError' object has no attribute 'message'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 481, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] eval_result = executable_bundle.execute(img_obj, tag, ExecutionContext(db_session=db, configuration={}))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1260, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] bundle_exec = self._process_mapping_result(bundle_exec, image_object, tag, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1207, in _process_mapping_result
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errors, policy_decision = evaluated_policy.execute(image_obj=image_object, context=context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 596, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errs, matches = rule.execute(image_obj=image_obj, exec_context=exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 458, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self.configured_trigger, message='Could not evaluate trigger due to error in evaluation execution')
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] anchore_engine.services.policy_engine.engine.policy.exceptions.TriggerEvaluationError: TriggerEvaluationError: severity:error message:Trigger evaluation failed for gate metadata and trigger attribute, with parameters: ([]) due to: Could not evaluate trigger due to error in evaluation execution
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/apis/authorization.py", line 339, in inner_wrapper
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return f(*args, **kwargs)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 484, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] abort(Response(response='Cannot execute given policy against the image due to errors executing the policy bundle: {}'.format(e.message), status=500))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/werkzeug/exceptions.py", line 707, in abort
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return _aborter(status, *args, **kwargs)
|
ValueError
|
def __init__(self, parent, policy_json=None):
super(ExecutablePolicyRule, self).__init__(parent, policy_json)
# Configure the trigger instance
try:
self.gate_cls = Gate.get_gate_by_name(self.gate_name)
except KeyError:
# Gate not found
self.error_exc = GateNotFoundError(
gate=self.gate_name,
valid_gates=Gate.registered_gate_names(),
rule_id=self.rule_id,
)
self.configured_trigger = None
raise self.error_exc
try:
selected_trigger_cls = self.gate_cls.get_trigger_named(
self.trigger_name.lower()
)
except KeyError:
self.error_exc = TriggerNotFoundError(
valid_triggers=self.gate_cls.trigger_names(),
trigger=self.trigger_name,
gate=self.gate_name,
rule_id=self.rule_id,
)
self.configured_trigger = None
raise self.error_exc
try:
try:
self.configured_trigger = selected_trigger_cls(
parent_gate_cls=self.gate_cls,
rule_id=self.rule_id,
**self.trigger_params,
)
except (
TriggerNotFoundError,
InvalidParameterError,
ParameterValueInvalidError,
) as e:
# Error finding or initializing the trigger
self.error_exc = e
self.configured_trigger = None
if hasattr(e, "gate") and e.gate is None:
e.gate = self.gate_name
if hasattr(e, "trigger") and e.trigger is None:
e.trigger = self.trigger_name
if hasattr(e, "rule_id") and e.rule_id is None:
e.rule_id = self.rule_id
raise e
except PolicyError:
raise # To filter out already-handled errors
except Exception as e:
raise ValidationError.caused_by(e)
|
def __init__(self, parent, policy_json=None):
super(ExecutablePolicyRule, self).__init__(parent, policy_json)
# Configure the trigger instance
try:
self.gate_cls = Gate.get_gate_by_name(self.gate_name)
except KeyError:
# Gate not found
self.error_exc = GateNotFoundError(
gate=self.gate_name,
valid_gates=Gate.registered_gate_names(),
rule_id=self.rule_id,
)
self.configured_trigger = None
raise self.error_exc
try:
selected_trigger_cls = self.gate_cls.get_trigger_named(
self.trigger_name.lower()
)
except KeyError:
self.error_exc = TriggerNotFoundError(
valid_triggers=self.gate_cls.trigger_names(),
trigger=self.trigger_name,
gate=self.gate_name,
rule_id=self.rule_id,
)
self.configured_trigger = None
raise self.error_exc
try:
try:
self.configured_trigger = selected_trigger_cls(
parent_gate_cls=self.gate_cls,
rule_id=self.rule_id,
**self.trigger_params,
)
except [
TriggerNotFoundError,
InvalidParameterError,
ParameterValueInvalidError,
] as e:
# Error finding or initializing the trigger
log.exception("Policy rule execution exception: {}".format(e))
self.error_exc = e
self.configured_trigger = None
if hasattr(e, "gate") and e.gate is None:
e.gate = self.gate_name
if hasattr(e, "trigger") and e.trigger is None:
e.trigger = self.trigger_name
if hasattr(e, "rule_id") and e.rule_id is None:
e.rule_id = self.rule_id
raise e
except PolicyError:
raise # To filter out already-handled errors
except Exception as e:
raise ValidationError.caused_by(e)
|
https://github.com/anchore/anchore-engine/issues/124
|
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 234, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.evaluate(image_obj, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gates/image_metadata.py", line 59, in evaluate
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] rval = type(img_val)(rval)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] ValueError: invalid literal for int() with base 10: '4gb'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 453, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.configured_trigger.execute(image_obj, exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 237, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self, message='Error executing gate {} trigger {} with params: {}. Msg: {}'.format(self.gate_cls.__gate_name__, self.__trigger_name__, self.eval_params, e.message))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] AttributeError: 'ValueError' object has no attribute 'message'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 481, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] eval_result = executable_bundle.execute(img_obj, tag, ExecutionContext(db_session=db, configuration={}))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1260, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] bundle_exec = self._process_mapping_result(bundle_exec, image_object, tag, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1207, in _process_mapping_result
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errors, policy_decision = evaluated_policy.execute(image_obj=image_object, context=context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 596, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errs, matches = rule.execute(image_obj=image_obj, exec_context=exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 458, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self.configured_trigger, message='Could not evaluate trigger due to error in evaluation execution')
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] anchore_engine.services.policy_engine.engine.policy.exceptions.TriggerEvaluationError: TriggerEvaluationError: severity:error message:Trigger evaluation failed for gate metadata and trigger attribute, with parameters: ([]) due to: Could not evaluate trigger due to error in evaluation execution
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/apis/authorization.py", line 339, in inner_wrapper
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return f(*args, **kwargs)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 484, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] abort(Response(response='Cannot execute given policy against the image due to errors executing the policy bundle: {}'.format(e.message), status=500))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/werkzeug/exceptions.py", line 707, in abort
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return _aborter(status, *args, **kwargs)
|
ValueError
|
def execute(self, image_obj, exec_context):
"""
Execute the trigger specified in the rule with the image and gate (for prepared context) and exec_context)
:param image_obj: The image to execute against
:param exec_context: The prepared execution context from the gate init
:return: a tuple of a list of errors and a list of PolicyRuleDecisions, one for each fired trigger match produced by the trigger execution
"""
matches = None
try:
if not self.configured_trigger:
log.error(
"No configured trigger to execute for gate {} and trigger: {}. Returning".format(
self.gate_name, self.trigger_name
)
)
raise TriggerNotFoundError(
trigger_name=self.trigger_name, gate_name=self.gate_name
)
if self.gate_cls.__lifecycle_state__ == LifecycleStates.eol:
self.errors.append(
EndOfLifedError(
gate_name=self.gate_name, superceded=self.gate_cls.__superceded_by__
)
)
elif self.gate_cls.__lifecycle_state__ == LifecycleStates.deprecated:
self.errors.append(
DeprecationWarning(
gate_name=self.gate_name, superceded=self.gate_cls.__superceded_by__
)
)
elif self.configured_trigger.__lifecycle_state__ == LifecycleStates.eol:
self.errors.append(
EndOfLifedError(
gate_name=self.gate_name,
trigger_name=self.trigger_name,
superceded=self.configured_trigger.__superceded_by__,
)
)
elif self.configured_trigger.__lifecycle_state__ == LifecycleStates.deprecated:
self.errors.append(
DeprecationWarning(
gate_name=self.gate_name,
trigger_name=self.trigger_name,
superceded=self.configured_trigger.__superceded_by__,
)
)
try:
self.configured_trigger.execute(image_obj, exec_context)
except TriggerEvaluationError:
raise
except Exception as e:
log.exception("Unmapped exception caught during trigger evaluation")
raise TriggerEvaluationError(
trigger=self.configured_trigger, message="Could not evaluate trigger"
)
matches = self.configured_trigger.fired
decisions = []
# Try all rules and record all decisions and errors so multiple errors can be reported if present, not just the first encountered
for match in matches:
try:
decisions.append(
PolicyRuleDecision(trigger_match=match, policy_rule=self)
)
except TriggerEvaluationError as e:
log.exception("Policy rule decision mapping exception: {}".format(e))
self.errors.append(str(e))
return self.errors, decisions
except Exception as e:
log.exception(
"Error executing trigger {} on image {}".format(
self.trigger_name, image_obj.id
)
)
raise
|
def execute(self, image_obj, exec_context):
"""
Execute the trigger specified in the rule with the image and gate (for prepared context) and exec_context)
:param image_obj: The image to execute against
:param exec_context: The prepared execution context from the gate init
:return: a tuple of a list of errors and a list of PolicyRuleDecisions, one for each fired trigger match produced by the trigger execution
"""
matches = None
try:
if not self.configured_trigger:
log.error(
"No configured trigger to execute for gate {} and trigger: {}. Returning".format(
self.gate_name, self.trigger_name
)
)
raise TriggerNotFoundError(
trigger_name=self.trigger_name, gate_name=self.gate_name
)
if self.gate_cls.__lifecycle_state__ == LifecycleStates.eol:
self.errors.append(
EndOfLifedError(
gate_name=self.gate_name, superceded=self.gate_cls.__superceded_by__
)
)
elif self.gate_cls.__lifecycle_state__ == LifecycleStates.deprecated:
self.errors.append(
DeprecationWarning(
gate_name=self.gate_name, superceded=self.gate_cls.__superceded_by__
)
)
elif self.configured_trigger.__lifecycle_state__ == LifecycleStates.eol:
self.errors.append(
EndOfLifedError(
gate_name=self.gate_name,
trigger_name=self.trigger_name,
superceded=self.configured_trigger.__superceded_by__,
)
)
elif self.configured_trigger.__lifecycle_state__ == LifecycleStates.deprecated:
self.errors.append(
DeprecationWarning(
gate_name=self.gate_name,
trigger_name=self.trigger_name,
superceded=self.configured_trigger.__superceded_by__,
)
)
try:
self.configured_trigger.execute(image_obj, exec_context)
except TriggerEvaluationError:
raise
except Exception as e:
log.exception("Unmapped exception caught during trigger evaluation")
raise TriggerEvaluationError(
trigger=self.configured_trigger,
message="Could not evaluate trigger due to error in evaluation execution",
)
matches = self.configured_trigger.fired
decisions = []
# Try all rules and record all decisions and errors so multiple errors can be reported if present, not just the first encountered
for match in matches:
try:
decisions.append(
PolicyRuleDecision(trigger_match=match, policy_rule=self)
)
except TriggerEvaluationError as e:
log.exception("Policy rule decision mapping exception: {}".format(e))
self.errors.append(str(e))
return self.errors, decisions
except Exception as e:
log.exception(
"Error executing trigger {} on image {}".format(
self.trigger_name, image_obj.id
)
)
raise
|
https://github.com/anchore/anchore-engine/issues/124
|
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 234, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.evaluate(image_obj, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gates/image_metadata.py", line 59, in evaluate
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] rval = type(img_val)(rval)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] ValueError: invalid literal for int() with base 10: '4gb'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 453, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.configured_trigger.execute(image_obj, exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 237, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self, message='Error executing gate {} trigger {} with params: {}. Msg: {}'.format(self.gate_cls.__gate_name__, self.__trigger_name__, self.eval_params, e.message))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] AttributeError: 'ValueError' object has no attribute 'message'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 481, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] eval_result = executable_bundle.execute(img_obj, tag, ExecutionContext(db_session=db, configuration={}))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1260, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] bundle_exec = self._process_mapping_result(bundle_exec, image_object, tag, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1207, in _process_mapping_result
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errors, policy_decision = evaluated_policy.execute(image_obj=image_object, context=context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 596, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errs, matches = rule.execute(image_obj=image_obj, exec_context=exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 458, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self.configured_trigger, message='Could not evaluate trigger due to error in evaluation execution')
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] anchore_engine.services.policy_engine.engine.policy.exceptions.TriggerEvaluationError: TriggerEvaluationError: severity:error message:Trigger evaluation failed for gate metadata and trigger attribute, with parameters: ([]) due to: Could not evaluate trigger due to error in evaluation execution
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/apis/authorization.py", line 339, in inner_wrapper
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return f(*args, **kwargs)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 484, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] abort(Response(response='Cannot execute given policy against the image due to errors executing the policy bundle: {}'.format(e.message), status=500))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/werkzeug/exceptions.py", line 707, in abort
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return _aborter(status, *args, **kwargs)
|
ValueError
|
def build_empty_error_execution(image_obj, tag, bundle, errors=None, warnings=None):
"""
Creates an empty BundleExecution suitable for use in error cases where the bundle was not actually run but this object
is needed to populate errors and warnings for return.
:param image_obj:
:param tag:
:param bundle:
:return: BundleExecution object with bundle, image, and tag set and a STOP final action.
"""
b = BundleExecution(bundle=bundle, image_id=image_obj.id, tag=tag)
b.bundle_decision = BundleDecision(policy_decisions=[FailurePolicyDecision()])
b.errors = errors
b.warnings = warnings
return b
|
def build_empty_error_execution(image_obj, tag, bundle, errors=None, warnings=None):
"""
Creates an empty BundleExecution suitable for use in error cases where the bundle was not actually run but this object
is needed to populate errors and warnings for return.
:param image_obj:
:param tag:
:param bundle:
:return: BundleExecution object with bundle, image, and tag set and a STOP final action.
"""
b = BundleExecution(bundle=bundle, image_id=image_obj.id, tag=tag)
b.bundle_decision = BundleDecision(policy_decision=FailurePolicyDecision())
b.errors = errors
b.warnings = warnings
return b
|
https://github.com/anchore/anchore-engine/issues/124
|
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 234, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.evaluate(image_obj, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gates/image_metadata.py", line 59, in evaluate
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] rval = type(img_val)(rval)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] ValueError: invalid literal for int() with base 10: '4gb'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 453, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.configured_trigger.execute(image_obj, exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 237, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self, message='Error executing gate {} trigger {} with params: {}. Msg: {}'.format(self.gate_cls.__gate_name__, self.__trigger_name__, self.eval_params, e.message))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] AttributeError: 'ValueError' object has no attribute 'message'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 481, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] eval_result = executable_bundle.execute(img_obj, tag, ExecutionContext(db_session=db, configuration={}))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1260, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] bundle_exec = self._process_mapping_result(bundle_exec, image_object, tag, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1207, in _process_mapping_result
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errors, policy_decision = evaluated_policy.execute(image_obj=image_object, context=context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 596, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errs, matches = rule.execute(image_obj=image_obj, exec_context=exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 458, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self.configured_trigger, message='Could not evaluate trigger due to error in evaluation execution')
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] anchore_engine.services.policy_engine.engine.policy.exceptions.TriggerEvaluationError: TriggerEvaluationError: severity:error message:Trigger evaluation failed for gate metadata and trigger attribute, with parameters: ([]) due to: Could not evaluate trigger due to error in evaluation execution
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/apis/authorization.py", line 339, in inner_wrapper
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return f(*args, **kwargs)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 484, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] abort(Response(response='Cannot execute given policy against the image due to errors executing the policy bundle: {}'.format(e.message), status=500))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/werkzeug/exceptions.py", line 707, in abort
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return _aborter(status, *args, **kwargs)
|
ValueError
|
def __init__(self, trigger, message=None):
params = {x.name: x.value() for x in trigger.parameters().values()}
trigger_name = trigger.__trigger_name__ if trigger else "unset"
gate_name = (
trigger.gate_cls.__gate_name__ if trigger and trigger.gate_cls else "unset"
)
msg = "Trigger evaluation failed for gate {} and trigger {}, with parameters: ({}) due to: {}".format(
gate_name, trigger_name, params, message
)
super().__init__(msg)
self.trigger = trigger
self.gate = trigger.gate_cls
|
def __init__(self, trigger, message=None):
params = trigger.eval_params if trigger and trigger.eval_params else []
trigger_name = trigger.__trigger_name__ if trigger else "unset"
gate_name = (
trigger.gate_cls.__gate_name__ if trigger and trigger.gate_cls else "unset"
)
msg = "Trigger evaluation failed for gate {} and trigger {}, with parameters: ({}) due to: {}".format(
gate_name, trigger_name, params, message
)
super().__init__(msg)
self.trigger = trigger
self.gate = trigger.gate_cls
|
https://github.com/anchore/anchore-engine/issues/124
|
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 234, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.evaluate(image_obj, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gates/image_metadata.py", line 59, in evaluate
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] rval = type(img_val)(rval)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] ValueError: invalid literal for int() with base 10: '4gb'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 453, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.configured_trigger.execute(image_obj, exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 237, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self, message='Error executing gate {} trigger {} with params: {}. Msg: {}'.format(self.gate_cls.__gate_name__, self.__trigger_name__, self.eval_params, e.message))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] AttributeError: 'ValueError' object has no attribute 'message'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 481, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] eval_result = executable_bundle.execute(img_obj, tag, ExecutionContext(db_session=db, configuration={}))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1260, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] bundle_exec = self._process_mapping_result(bundle_exec, image_object, tag, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1207, in _process_mapping_result
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errors, policy_decision = evaluated_policy.execute(image_obj=image_object, context=context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 596, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errs, matches = rule.execute(image_obj=image_obj, exec_context=exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 458, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self.configured_trigger, message='Could not evaluate trigger due to error in evaluation execution')
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] anchore_engine.services.policy_engine.engine.policy.exceptions.TriggerEvaluationError: TriggerEvaluationError: severity:error message:Trigger evaluation failed for gate metadata and trigger attribute, with parameters: ([]) due to: Could not evaluate trigger due to error in evaluation execution
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/apis/authorization.py", line 339, in inner_wrapper
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return f(*args, **kwargs)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 484, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] abort(Response(response='Cannot execute given policy against the image due to errors executing the policy bundle: {}'.format(e.message), status=500))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/werkzeug/exceptions.py", line 707, in abort
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return _aborter(status, *args, **kwargs)
|
ValueError
|
def __init__(self, parent_gate_cls, rule_id=None, **kwargs):
"""
Instantiate the trigger with a specific set of parameters. Does not evaluate the trigger, just configures
it for execution.
"""
self.gate_cls = parent_gate_cls
self.msg = None
self._fired_instances = []
self.rule_id = rule_id
# Short circuit if gate is eol or trigger is eol
if (
self.gate_cls.__lifecycle_state__ == LifecycleStates.eol
or self.__lifecycle_state__ == LifecycleStates.eol
):
return
# Setup the parameters, try setting each. If not provided, set to None to handle validation path for required params
invalid_params = []
# The list of class vars that are parameters
params = self.__class__._parameters()
param_name_map = {}
if kwargs is None:
kwargs = {}
# Find all class objects that are params
for attr_name, param_obj in list(params.items()):
for a in param_obj.aliases:
param_name_map[a] = param_obj.name
param_name_map[param_obj.name] = param_obj.name
try:
setattr(self, attr_name, copy.deepcopy(param_obj))
param_value = kwargs.get(param_obj.name, None)
if param_value is None:
# Try aliases
for alias in param_obj.aliases:
param_value = kwargs.get(alias, None)
if param_value:
break
getattr(self, attr_name).set_value(param_value)
except ValidationError as e:
invalid_params.append(
ParameterValueInvalidError(
validation_error=e,
gate=self.gate_cls.__gate_name__,
trigger=self.__trigger_name__,
rule_id=self.rule_id,
)
)
# One last pass to catch any dependent validations after all values are set, to eliminate issues due to eval order
for param_obj in filter(
lambda x: isinstance(x.validator, LinkedValidator),
list(self.parameters().values()),
):
# Update the discriminator link to the object member instead of the class member
param_obj.validator.inject_discriminator(
self.parameters()[param_obj.validator.discriminator_name].value()
)
try:
param_obj.validator.validate(param_obj._param_value)
except ValidationError as e:
invalid_params.append(
ParameterValueInvalidError(
validation_error=e,
gate=self.gate_cls.__gate_name__,
trigger=self.__trigger_name__,
rule_id=self.rule_id,
)
)
# Then, check for any parameters provided that are not defined in the trigger.
if kwargs:
given_param_names = set([param_name_map.get(x) for x in list(kwargs.keys())])
for i in given_param_names.difference(
set([x.name for x in list(params.values())])
):
# Need to aggregate and return all invalid if there is more than one
invalid_params.append(
InvalidParameterError(
i,
list(params.keys()),
trigger=self.__trigger_name__,
gate=self.gate_cls.__gate_name__,
)
)
if invalid_params:
raise PolicyRuleValidationErrorCollection(
invalid_params,
trigger=self.__trigger_name__,
gate=self.gate_cls.__gate_name__,
)
|
def __init__(self, parent_gate_cls, rule_id=None, **kwargs):
"""
Instantiate the trigger with a specific set of parameters. Does not evaluate the trigger, just configures
it for execution.
"""
self.gate_cls = parent_gate_cls
self.msg = None
self.eval_params = {}
self._fired_instances = []
self.rule_id = rule_id
# Short circuit if gate is eol or trigger is eol
if (
self.gate_cls.__lifecycle_state__ == LifecycleStates.eol
or self.__lifecycle_state__ == LifecycleStates.eol
):
return
# Setup the parameters, try setting each. If not provided, set to None to handle validation path for required params
invalid_params = []
# The list of class vars that are parameters
params = self.__class__._parameters()
param_name_map = {}
if kwargs is None:
kwargs = {}
# Find all class objects that are params
for attr_name, param_obj in list(params.items()):
for a in param_obj.aliases:
param_name_map[a] = param_obj.name
param_name_map[param_obj.name] = param_obj.name
try:
setattr(self, attr_name, copy.deepcopy(param_obj))
param_value = kwargs.get(param_obj.name, None)
if param_value is None:
# Try aliases
for alias in param_obj.aliases:
param_value = kwargs.get(alias, None)
if param_value:
break
getattr(self, attr_name).set_value(param_value)
except ValidationError as e:
invalid_params.append(
ParameterValueInvalidError(
validation_error=e,
gate=self.gate_cls.__gate_name__,
trigger=self.__trigger_name__,
rule_id=self.rule_id,
)
)
# Then, check for any parameters provided that are not defined in the trigger.
if kwargs:
given_param_names = set([param_name_map.get(x) for x in list(kwargs.keys())])
for i in given_param_names.difference(
set([x.name for x in list(params.values())])
):
# Need to aggregate and return all invalid if there is more than one
invalid_params.append(
InvalidParameterError(
i,
list(params.keys()),
trigger=self.__trigger_name__,
gate=self.gate_cls.__gate_name__,
)
)
if invalid_params:
raise PolicyRuleValidationErrorCollection(
invalid_params,
trigger=self.__trigger_name__,
gate=self.gate_cls.__gate_name__,
)
|
https://github.com/anchore/anchore-engine/issues/124
|
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 234, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.evaluate(image_obj, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gates/image_metadata.py", line 59, in evaluate
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] rval = type(img_val)(rval)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] ValueError: invalid literal for int() with base 10: '4gb'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 453, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.configured_trigger.execute(image_obj, exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 237, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self, message='Error executing gate {} trigger {} with params: {}. Msg: {}'.format(self.gate_cls.__gate_name__, self.__trigger_name__, self.eval_params, e.message))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] AttributeError: 'ValueError' object has no attribute 'message'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 481, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] eval_result = executable_bundle.execute(img_obj, tag, ExecutionContext(db_session=db, configuration={}))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1260, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] bundle_exec = self._process_mapping_result(bundle_exec, image_object, tag, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1207, in _process_mapping_result
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errors, policy_decision = evaluated_policy.execute(image_obj=image_object, context=context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 596, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errs, matches = rule.execute(image_obj=image_obj, exec_context=exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 458, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self.configured_trigger, message='Could not evaluate trigger due to error in evaluation execution')
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] anchore_engine.services.policy_engine.engine.policy.exceptions.TriggerEvaluationError: TriggerEvaluationError: severity:error message:Trigger evaluation failed for gate metadata and trigger attribute, with parameters: ([]) due to: Could not evaluate trigger due to error in evaluation execution
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/apis/authorization.py", line 339, in inner_wrapper
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return f(*args, **kwargs)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 484, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] abort(Response(response='Cannot execute given policy against the image due to errors executing the policy bundle: {}'.format(e.message), status=500))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/werkzeug/exceptions.py", line 707, in abort
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return _aborter(status, *args, **kwargs)
|
ValueError
|
def execute(self, image_obj, context):
"""
Main entry point for the trigger execution. Will clear any previously saved exec state and call the evaluate() function.
:param image_obj:
:param context:
:return:
"""
self.reset()
if (
self.gate_cls.__lifecycle_state__ != LifecycleStates.eol
and self.__lifecycle_state__ != LifecycleStates.eol
):
if image_obj is None:
raise TriggerEvaluationError(
trigger=self, message="No image provided to evaluate against"
)
try:
self.evaluate(image_obj, context)
except Exception as e:
logger.exception("Error evaluating trigger. Aborting trigger execution")
raise TriggerEvaluationError(trigger=self, message=str(e))
return True
|
def execute(self, image_obj, context):
"""
Main entry point for the trigger execution. Will clear any previously saved exec state and call the evaluate() function.
:param image_obj:
:param context:
:return:
"""
self.reset()
if (
self.gate_cls.__lifecycle_state__ != LifecycleStates.eol
and self.__lifecycle_state__ != LifecycleStates.eol
):
if image_obj is None:
raise TriggerEvaluationError(
trigger=self, message="No image provided to evaluate against"
)
try:
self.evaluate(image_obj, context)
except Exception as e:
logger.exception("Error evaluating trigger. Aborting trigger execution")
raise TriggerEvaluationError(
trigger=self,
message="Error executing gate {} trigger {} with params: {}. Msg: {}".format(
self.gate_cls.__gate_name__,
self.__trigger_name__,
self.eval_params,
e.message,
),
)
return True
|
https://github.com/anchore/anchore-engine/issues/124
|
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 234, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.evaluate(image_obj, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gates/image_metadata.py", line 59, in evaluate
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] rval = type(img_val)(rval)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] ValueError: invalid literal for int() with base 10: '4gb'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 453, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.configured_trigger.execute(image_obj, exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 237, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self, message='Error executing gate {} trigger {} with params: {}. Msg: {}'.format(self.gate_cls.__gate_name__, self.__trigger_name__, self.eval_params, e.message))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] AttributeError: 'ValueError' object has no attribute 'message'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 481, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] eval_result = executable_bundle.execute(img_obj, tag, ExecutionContext(db_session=db, configuration={}))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1260, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] bundle_exec = self._process_mapping_result(bundle_exec, image_object, tag, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1207, in _process_mapping_result
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errors, policy_decision = evaluated_policy.execute(image_obj=image_object, context=context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 596, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errs, matches = rule.execute(image_obj=image_obj, exec_context=exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 458, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self.configured_trigger, message='Could not evaluate trigger due to error in evaluation execution')
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] anchore_engine.services.policy_engine.engine.policy.exceptions.TriggerEvaluationError: TriggerEvaluationError: severity:error message:Trigger evaluation failed for gate metadata and trigger attribute, with parameters: ([]) due to: Could not evaluate trigger due to error in evaluation execution
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/apis/authorization.py", line 339, in inner_wrapper
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return f(*args, **kwargs)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 484, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] abort(Response(response='Cannot execute given policy against the image due to errors executing the policy bundle: {}'.format(e.message), status=500))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/werkzeug/exceptions.py", line 707, in abort
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return _aborter(status, *args, **kwargs)
|
ValueError
|
def evaluate(self, image_obj, context):
attr = self.attribute.value()
check = self.check.value()
rval = self.check_value.value()
if not attr or not check:
return
op = self.__ops__.get(check)
if op is None or op.requires_rvalue and not rval:
# Raise exception or fall thru
return
img_val = self.__valid_attributes__[attr][0](image_obj)
# Make consistent types (specifically for int/float/str)
if type(img_val) in [int, float, str]:
if attr == "size":
rval = convert_bytes_size(rval)
else:
rval = type(img_val)(rval)
if op.eval_function(img_val, rval):
self._fire(
msg="Attribute check for attribute: '{}' check: '{}' check_value: '{}' matched image value: '{}'".format(
attr, check, (str(rval) if rval is not None else ""), img_val
)
)
|
def evaluate(self, image_obj, context):
attr = self.attribute.value()
check = self.check.value()
rval = self.check_value.value()
if not attr or not check:
return
op = self.__ops__.get(check)
if op is None or op.requires_rvalue and not rval:
# Raise exception or fall thru
return
img_val = self.__valid_attributes__[attr](image_obj)
# Make consistent types (specifically for int/float/str)
if type(img_val) in [str, int, float, str]:
rval = type(img_val)(rval)
if op.eval_function(img_val, rval):
self._fire(
msg="Attribute check for attribute: '{}' check: '{}' check_value: '{}' matched image value: '{}'".format(
attr, check, (str(rval) if rval is not None else ""), img_val
)
)
|
https://github.com/anchore/anchore-engine/issues/124
|
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 234, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.evaluate(image_obj, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gates/image_metadata.py", line 59, in evaluate
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] rval = type(img_val)(rval)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] ValueError: invalid literal for int() with base 10: '4gb'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 453, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] self.configured_trigger.execute(image_obj, exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/gate.py", line 237, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self, message='Error executing gate {} trigger {} with params: {}. Msg: {}'.format(self.gate_cls.__gate_name__, self.__trigger_name__, self.eval_params, e.message))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] AttributeError: 'ValueError' object has no attribute 'message'
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 481, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] eval_result = executable_bundle.execute(img_obj, tag, ExecutionContext(db_session=db, configuration={}))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1260, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] bundle_exec = self._process_mapping_result(bundle_exec, image_object, tag, context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 1207, in _process_mapping_result
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errors, policy_decision = evaluated_policy.execute(image_obj=image_object, context=context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 596, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] errs, matches = rule.execute(image_obj=image_obj, exec_context=exec_context)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/engine/policy/bundles.py", line 458, in execute
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] raise TriggerEvaluationError(trigger=self.configured_trigger, message='Could not evaluate trigger due to error in evaluation execution')
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] anchore_engine.services.policy_engine.engine.policy.exceptions.TriggerEvaluationError: TriggerEvaluationError: severity:error message:Trigger evaluation failed for gate metadata and trigger attribute, with parameters: ([]) due to: Could not evaluate trigger due to error in evaluation execution
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] During handling of the above exception, another exception occurred:
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-]
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] Traceback (most recent call last):
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/apis/authorization.py", line 339, in inner_wrapper
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return f(*args, **kwargs)
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/anchore_engine/services/policy_engine/api/controllers/synchronous_operations.py", line 484, in check_user_image_inline
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] abort(Response(response='Cannot execute given policy against the image due to errors executing the policy bundle: {}'.format(e.message), status=500))
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] File "/usr/local/lib/python3.6/dist-packages/werkzeug/exceptions.py", line 707, in abort
engine-policy-engine_1 | [service:policy-engine] 2019-01-11 05:49:12+0000 [-] return _aborter(status, *args, **kwargs)
|
ValueError
|
def register_predictors(self, model_data_arr, setup=True):
it = self._get_integrations()
for integration in it:
register = True
if setup:
register = self._setup_integration(integration)
if register:
if integration.check_connection():
integration.register_predictors(model_data_arr)
else:
logger.warning(
f"There is no connection to {integration.name}. predictor wouldn't be registred."
)
integration = [integration]
|
def register_predictors(self, model_data_arr, setup=True):
it = self._get_integrations()
for integration in it:
register = True
if setup:
register = self._setup_integration(integration)
if register:
integration.register_predictors(model_data_arr)
integration = [integration]
|
https://github.com/mindsdb/mindsdb/issues/1007
|
[2020-12-14 10:40:17,942] ERROR in app: Exception on /api/predictors/home_initial [DELETE]
Traceback (most recent call last):
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 233, in _open_connection
self._cmysql.connect(**cnx_kwargs)
_mysql_connector.MySQLInterfaceError: Access denied for user 'roote'@'localhost'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/api/http/namespaces/predictor.py", line 116, in delete
ca.mindsdb_native.delete_model(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/native/mindsdb.py", line 97, in delete_model
self.dbw.unregister_predictor(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/database/database.py", line 59, in unregister_predictor
integration.unregister_predictor(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py", line 131, in unregister_predictor
self._query(q)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py", line 42, in _query
con = mysql.connector.connect(
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/__init__.py", line 270, in connect
return CMySQLConnection(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 86, in __init__
self.connect(**kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/abstracts.py", line 985, in connect
self._open_connection()
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 235, in _open_connection
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
mysql.connector.errors.ProgrammingError: 1698 (28000): Access denied for user 'roote'@'localhost'
ERROR:mindsdb.api.http.initialize:Exception on /api/predictors/home_initial [DELETE]
Traceback (most recent call last):
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 233, in _open_connection
self._cmysql.connect(**cnx_kwargs)
_mysql_connector.MySQLInterfaceError: Access denied for user 'roote'@'localhost'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/api/http/namespaces/predictor.py", line 116, in delete
ca.mindsdb_native.delete_model(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/native/mindsdb.py", line 97, in delete_model
self.dbw.unregister_predictor(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/database/database.py", line 59, in unregister_predictor
integration.unregister_predictor(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py", line 131, in unregister_predictor
self._query(q)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py", line 42, in _query
con = mysql.connector.connect(
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/__init__.py", line 270, in connect
return CMySQLConnection(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 86, in __init__
self.connect(**kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/abstracts.py", line 985, in connect
self._open_connection()
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 235, in _open_connection
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
mysql.connector.errors.ProgrammingError: 1698 (28000): Access denied for user 'roote'@'localhost'
|
_mysql_connector.MySQLInterfaceError
|
def unregister_predictor(self, name):
for integration in self._get_integrations():
if integration.check_connection():
integration.unregister_predictor(name)
else:
logger.warning(
f"There is no connection to {integration.name}. predictor wouldn't be unregistred"
)
|
def unregister_predictor(self, name):
for integration in self._get_integrations():
integration.unregister_predictor(name)
|
https://github.com/mindsdb/mindsdb/issues/1007
|
[2020-12-14 10:40:17,942] ERROR in app: Exception on /api/predictors/home_initial [DELETE]
Traceback (most recent call last):
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 233, in _open_connection
self._cmysql.connect(**cnx_kwargs)
_mysql_connector.MySQLInterfaceError: Access denied for user 'roote'@'localhost'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/api/http/namespaces/predictor.py", line 116, in delete
ca.mindsdb_native.delete_model(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/native/mindsdb.py", line 97, in delete_model
self.dbw.unregister_predictor(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/database/database.py", line 59, in unregister_predictor
integration.unregister_predictor(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py", line 131, in unregister_predictor
self._query(q)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py", line 42, in _query
con = mysql.connector.connect(
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/__init__.py", line 270, in connect
return CMySQLConnection(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 86, in __init__
self.connect(**kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/abstracts.py", line 985, in connect
self._open_connection()
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 235, in _open_connection
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
mysql.connector.errors.ProgrammingError: 1698 (28000): Access denied for user 'roote'@'localhost'
ERROR:mindsdb.api.http.initialize:Exception on /api/predictors/home_initial [DELETE]
Traceback (most recent call last):
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 233, in _open_connection
self._cmysql.connect(**cnx_kwargs)
_mysql_connector.MySQLInterfaceError: Access denied for user 'roote'@'localhost'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/api/http/namespaces/predictor.py", line 116, in delete
ca.mindsdb_native.delete_model(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/native/mindsdb.py", line 97, in delete_model
self.dbw.unregister_predictor(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/database/database.py", line 59, in unregister_predictor
integration.unregister_predictor(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py", line 131, in unregister_predictor
self._query(q)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py", line 42, in _query
con = mysql.connector.connect(
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/__init__.py", line 270, in connect
return CMySQLConnection(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 86, in __init__
self.connect(**kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/abstracts.py", line 985, in connect
self._open_connection()
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 235, in _open_connection
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
mysql.connector.errors.ProgrammingError: 1698 (28000): Access denied for user 'roote'@'localhost'
|
_mysql_connector.MySQLInterfaceError
|
def get(self, name):
try:
if is_custom(name):
model = ca.custom_models.get_model_data(name)
else:
model = ca.mindsdb_native.get_model_data(name, native_view=True)
except Exception as e:
abort(404, "")
for k in ["train_end_at", "updated_at", "created_at"]:
if k in model and model[k] is not None:
model[k] = parse_datetime(model[k])
return model
|
def get(self, name):
try:
if is_custom(name):
model = ca.custom_models.get_model_data(name)
else:
model = ca.mindsdb_native.get_model_data(name)
except Exception as e:
abort(404, "")
for k in ["train_end_at", "updated_at", "created_at"]:
if k in model and model[k] is not None:
model[k] = parse_datetime(model[k])
return model
|
https://github.com/mindsdb/mindsdb/issues/928
|
'typing'
ERROR while fetching data for query: SELECT `satisfaction`, `satisfaction_confidence` FROM `mindsdb`.`airline_data` WHERE ((`age` = 30))
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py", line 68, in fetch
self._fetchData()
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py", line 440, in _fetchData
came_from=self.integration
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py", line 220, in select
for f in model['data_analysis_v2']['columns']
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py", line 220, in <dictcomp>
for f in model['data_analysis_v2']['columns']
KeyError: 'typing'
'typing'
|
KeyError
|
def get(self, name):
"""List of predictors colums"""
try:
if is_custom(name):
model = ca.custom_models.get_model_data(name)
else:
model = ca.mindsdb_native.get_model_data(name, native_view=True)
except Exception:
abort(404, "Invalid predictor name")
columns = []
for array, is_target_array in [
(model["data_analysis"]["target_columns_metadata"], True),
(model["data_analysis"]["input_columns_metadata"], False),
]:
for col_data in array:
column = {
"name": col_data["column_name"],
"data_type": col_data["data_type"].lower(),
"is_target_column": is_target_array,
}
if column["data_type"] == "categorical":
column["distribution"] = col_data["data_distribution"][
"data_histogram"
]["x"]
columns.append(column)
return columns, 200
|
def get(self, name):
"""List of predictors colums"""
try:
if is_custom(name):
model = ca.custom_models.get_model_data(name)
else:
model = ca.mindsdb_native.get_model_data(name)
except Exception:
abort(404, "Invalid predictor name")
columns = []
for array, is_target_array in [
(model["data_analysis"]["target_columns_metadata"], True),
(model["data_analysis"]["input_columns_metadata"], False),
]:
for col_data in array:
column = {
"name": col_data["column_name"],
"data_type": col_data["data_type"].lower(),
"is_target_column": is_target_array,
}
if column["data_type"] == "categorical":
column["distribution"] = col_data["data_distribution"][
"data_histogram"
]["x"]
columns.append(column)
return columns, 200
|
https://github.com/mindsdb/mindsdb/issues/928
|
'typing'
ERROR while fetching data for query: SELECT `satisfaction`, `satisfaction_confidence` FROM `mindsdb`.`airline_data` WHERE ((`age` = 30))
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py", line 68, in fetch
self._fetchData()
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py", line 440, in _fetchData
came_from=self.integration
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py", line 220, in select
for f in model['data_analysis_v2']['columns']
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py", line 220, in <dictcomp>
for f in model['data_analysis_v2']['columns']
KeyError: 'typing'
'typing'
|
KeyError
|
def cast_df_columns_types(df, stats):
types_map = {
DATA_TYPES.NUMERIC: {
DATA_SUBTYPES.INT: "int64",
DATA_SUBTYPES.FLOAT: "float64",
DATA_SUBTYPES.BINARY: "bool",
},
DATA_TYPES.DATE: {
DATA_SUBTYPES.DATE: "datetime64", # YYYY-MM-DD
DATA_SUBTYPES.TIMESTAMP: "datetime64", # YYYY-MM-DD hh:mm:ss or 1852362464
},
DATA_TYPES.CATEGORICAL: {
DATA_SUBTYPES.SINGLE: "category",
DATA_SUBTYPES.MULTIPLE: "category",
},
DATA_TYPES.FILE_PATH: {
DATA_SUBTYPES.IMAGE: "object",
DATA_SUBTYPES.VIDEO: "object",
DATA_SUBTYPES.AUDIO: "object",
},
DATA_TYPES.SEQUENTIAL: {DATA_SUBTYPES.ARRAY: "object"},
DATA_TYPES.TEXT: {DATA_SUBTYPES.SHORT: "object", DATA_SUBTYPES.RICH: "object"},
}
columns = [dict(name=x) for x in list(df.keys())]
for column in columns:
try:
name = column["name"]
col_type = stats[name]["typing"]["data_type"]
col_subtype = stats[name]["typing"]["data_subtype"]
new_type = types_map[col_type][col_subtype]
if new_type == "int64" or new_type == "float64":
df[name] = df[name].apply(
lambda x: x.replace(",", ".") if isinstance(x, str) else x
)
if new_type == "int64":
df = df.astype({name: "float64"})
df = df.astype({name: new_type})
except Exception as e:
print(e)
print(f"Error: cant convert type of DS column {name} to {new_type}")
return df
|
def cast_df_columns_types(df, stats):
types_map = {
DATA_TYPES.NUMERIC: {
DATA_SUBTYPES.INT: "int64",
DATA_SUBTYPES.FLOAT: "float64",
DATA_SUBTYPES.BINARY: "bool",
},
DATA_TYPES.DATE: {
DATA_SUBTYPES.DATE: "datetime64", # YYYY-MM-DD
DATA_SUBTYPES.TIMESTAMP: "datetime64", # YYYY-MM-DD hh:mm:ss or 1852362464
},
DATA_TYPES.CATEGORICAL: {
DATA_SUBTYPES.SINGLE: "category",
DATA_SUBTYPES.MULTIPLE: "category",
},
DATA_TYPES.FILE_PATH: {
DATA_SUBTYPES.IMAGE: "object",
DATA_SUBTYPES.VIDEO: "object",
DATA_SUBTYPES.AUDIO: "object",
},
DATA_TYPES.SEQUENTIAL: {DATA_SUBTYPES.ARRAY: "object"},
DATA_TYPES.TEXT: {DATA_SUBTYPES.SHORT: "object", DATA_SUBTYPES.RICH: "object"},
}
columns = [dict(name=x) for x in list(df.keys())]
for column in columns:
try:
name = column["name"]
if stats[name].get("empty", {}).get("is_empty", False):
new_type = types_map[DATA_TYPES.NUMERIC][DATA_SUBTYPES.INT]
else:
col_type = stats[name]["typing"]["data_type"]
col_subtype = stats[name]["typing"]["data_subtype"]
new_type = types_map[col_type][col_subtype]
if new_type == "int64" or new_type == "float64":
df[name] = df[name].apply(
lambda x: x.replace(",", ".") if isinstance(x, str) else x
)
if new_type == "int64":
df = df.astype({name: "float64"})
df = df.astype({name: new_type})
except Exception as e:
print(e)
print(f"Error: cant convert type of DS column {name} to {new_type}")
return df
|
https://github.com/mindsdb/mindsdb/issues/928
|
'typing'
ERROR while fetching data for query: SELECT `satisfaction`, `satisfaction_confidence` FROM `mindsdb`.`airline_data` WHERE ((`age` = 30))
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py", line 68, in fetch
self._fetchData()
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py", line 440, in _fetchData
came_from=self.integration
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py", line 220, in select
for f in model['data_analysis_v2']['columns']
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py", line 220, in <dictcomp>
for f in model['data_analysis_v2']['columns']
KeyError: 'typing'
'typing'
|
KeyError
|
def get_model_data(self, name, native_view=False):
model = F.get_model_data(name)
if native_view:
return model
data_analysis = model["data_analysis_v2"]
for column in data_analysis["columns"]:
if len(data_analysis[column]) == 0 or data_analysis[column].get(
"empty", {}
).get("is_empty", False):
data_analysis[column]["typing"] = {"data_subtype": DATA_SUBTYPES.INT}
return model
|
def get_model_data(self, name):
return F.get_model_data(name)
|
https://github.com/mindsdb/mindsdb/issues/928
|
'typing'
ERROR while fetching data for query: SELECT `satisfaction`, `satisfaction_confidence` FROM `mindsdb`.`airline_data` WHERE ((`age` = 30))
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py", line 68, in fetch
self._fetchData()
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py", line 440, in _fetchData
came_from=self.integration
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py", line 220, in select
for f in model['data_analysis_v2']['columns']
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py", line 220, in <dictcomp>
for f in model['data_analysis_v2']['columns']
KeyError: 'typing'
'typing'
|
KeyError
|
def cast_df_columns_types(df, stats):
types_map = {
DATA_TYPES.NUMERIC: {
DATA_SUBTYPES.INT: "int64",
DATA_SUBTYPES.FLOAT: "float64",
DATA_SUBTYPES.BINARY: "bool",
},
DATA_TYPES.DATE: {
DATA_SUBTYPES.DATE: "datetime64", # YYYY-MM-DD
DATA_SUBTYPES.TIMESTAMP: "datetime64", # YYYY-MM-DD hh:mm:ss or 1852362464
},
DATA_TYPES.CATEGORICAL: {
DATA_SUBTYPES.SINGLE: "category",
DATA_SUBTYPES.MULTIPLE: "category",
},
DATA_TYPES.FILE_PATH: {
DATA_SUBTYPES.IMAGE: "object",
DATA_SUBTYPES.VIDEO: "object",
DATA_SUBTYPES.AUDIO: "object",
},
DATA_TYPES.SEQUENTIAL: {DATA_SUBTYPES.ARRAY: "object"},
DATA_TYPES.TEXT: {DATA_SUBTYPES.SHORT: "object", DATA_SUBTYPES.RICH: "object"},
}
columns = [dict(name=x) for x in list(df.keys())]
for column in columns:
try:
name = column["name"]
if stats[name].get("empty", {}).get("is_empty", False):
new_type = types_map[DATA_TYPES.NUMERIC][DATA_SUBTYPES.INT]
else:
col_type = stats[name]["typing"]["data_type"]
col_subtype = stats[name]["typing"]["data_subtype"]
new_type = types_map[col_type][col_subtype]
if new_type == "int64" or new_type == "float64":
df[name] = df[name].apply(
lambda x: x.replace(",", ".") if isinstance(x, str) else x
)
if new_type == "int64":
df = df.astype({name: "float64"})
df = df.astype({name: new_type})
except Exception as e:
print(e)
print(f"Error: cant convert type of DS column {name} to {new_type}")
return df
|
def cast_df_columns_types(df, stats):
types_map = {
DATA_TYPES.NUMERIC: {
DATA_SUBTYPES.INT: "int64",
DATA_SUBTYPES.FLOAT: "float64",
DATA_SUBTYPES.BINARY: "bool",
},
DATA_TYPES.DATE: {
DATA_SUBTYPES.DATE: "datetime64", # YYYY-MM-DD
DATA_SUBTYPES.TIMESTAMP: "datetime64", # YYYY-MM-DD hh:mm:ss or 1852362464
},
DATA_TYPES.CATEGORICAL: {
DATA_SUBTYPES.SINGLE: "category",
DATA_SUBTYPES.MULTIPLE: "category",
},
DATA_TYPES.FILE_PATH: {
DATA_SUBTYPES.IMAGE: "object",
DATA_SUBTYPES.VIDEO: "object",
DATA_SUBTYPES.AUDIO: "object",
},
DATA_TYPES.SEQUENTIAL: {DATA_SUBTYPES.ARRAY: "object"},
DATA_TYPES.TEXT: {DATA_SUBTYPES.SHORT: "object", DATA_SUBTYPES.RICH: "object"},
}
columns = [dict(name=x) for x in list(df.keys())]
for column in columns:
try:
name = column["name"]
col_type = stats[name]["typing"]["data_type"]
col_subtype = stats[name]["typing"]["data_subtype"]
new_type = types_map[col_type][col_subtype]
if new_type == "int64" or new_type == "float64":
df[name] = df[name].apply(
lambda x: x.replace(",", ".") if isinstance(x, str) else x
)
if new_type == "int64":
df = df.astype({name: "float64"})
df = df.astype({name: new_type})
except Exception as e:
print(e)
print(f"Error: cant convert type of DS column {name} to {new_type}")
return df
|
https://github.com/mindsdb/mindsdb/issues/911
|
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 113, in put
ca.default_store.save_datasource(name, source_type, request.json)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/datastore.py", line 185, in save_datasource
df_with_types = cast_df_columns_types(df, self.get_analysis(df)['data_analysis_v2'])
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/sqlite_helpers.py", line 54, in cast_df_columns_types
print(f'Error: cant convert type of DS column {name} to {new_type}')
UnboundLocalError: local variable 'new_type' referenced before assignment
ERROR:mindsdb.api.http.initialize:Exception on /api/datasources/AirplaneData [PUT]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/sqlite_helpers.py", line 44, in cast_df_columns_types
col_type = stats[name]['typing']['data_type']
KeyError: 'typing'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 113, in put
ca.default_store.save_datasource(name, source_type, request.json)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/datastore.py", line 185, in save_datasource
df_with_types = cast_df_columns_types(df, self.get_analysis(df)['data_analysis_v2'])
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/sqlite_helpers.py", line 54, in cast_df_columns_types
print(f'Error: cant convert type of DS column {name} to {new_type}')
UnboundLocalError: local variable 'new_type' referenced before assignment
|
UnboundLocalError
|
def put(self, name):
"""add new datasource"""
data = {}
def on_field(field):
print(f"\n\n{field}\n\n")
name = field.field_name.decode()
value = field.value.decode()
data[name] = value
file_object = None
def on_file(file):
nonlocal file_object
data["file"] = file.file_name.decode()
file_object = file.file_object
temp_dir_path = tempfile.mkdtemp(prefix="datasource_file_")
if request.headers["Content-Type"].startswith("multipart/form-data"):
parser = multipart.create_form_parser(
headers=request.headers,
on_field=on_field,
on_file=on_file,
config={
"UPLOAD_DIR": temp_dir_path.encode(), # bytes required
"UPLOAD_KEEP_FILENAME": True,
"UPLOAD_KEEP_EXTENSIONS": True,
"MAX_MEMORY_FILE_SIZE": 0,
},
)
while True:
chunk = request.stream.read(8192)
if not chunk:
break
parser.write(chunk)
parser.finalize()
parser.close()
if file_object is not None and not file_object.closed:
file_object.close()
else:
data = request.json
if "query" in data:
query = request.json["query"]
source_type = request.json["integration_id"]
ca.default_store.save_datasource(name, source_type, query)
os.rmdir(temp_dir_path)
return ca.default_store.get_datasource(name)
ds_name = data["name"] if "name" in data else name
source = data["source"] if "source" in data else name
source_type = data["source_type"]
if source_type == "file":
file_path = os.path.join(temp_dir_path, data["file"])
else:
file_path = None
ca.default_store.save_datasource(ds_name, source_type, source, file_path)
os.rmdir(temp_dir_path)
return ca.default_store.get_datasource(ds_name)
|
def put(self, name):
"""add new datasource"""
data = {}
def on_field(field):
print(f"\n\n{field}\n\n")
name = field.field_name.decode()
value = field.value.decode()
data[name] = value
def on_file(file):
data["file"] = file.file_name.decode()
temp_dir_path = tempfile.mkdtemp(prefix="datasource_file_")
if request.headers["Content-Type"].startswith("multipart/form-data"):
parser = multipart.create_form_parser(
headers=request.headers,
on_field=on_field,
on_file=on_file,
config={
"UPLOAD_DIR": temp_dir_path.encode(), # bytes required
"UPLOAD_KEEP_FILENAME": True,
"UPLOAD_KEEP_EXTENSIONS": True,
"MAX_MEMORY_FILE_SIZE": 0,
},
)
while True:
chunk = request.stream.read(8192)
if not chunk:
break
parser.write(chunk)
parser.finalize()
parser.close()
else:
data = request.json
if "query" in data:
query = request.json["query"]
source_type = request.json["integration_id"]
ca.default_store.save_datasource(name, source_type, query)
os.rmdir(temp_dir_path)
return ca.default_store.get_datasource(name)
ds_name = data["name"] if "name" in data else name
source = data["source"] if "source" in data else name
source_type = data["source_type"]
if source_type == "file":
file_path = os.path.join(temp_dir_path, data["file"])
else:
file_path = None
ca.default_store.save_datasource(ds_name, source_type, source, file_path)
os.rmdir(temp_dir_path)
return ca.default_store.get_datasource(ds_name)
|
https://github.com/mindsdb/mindsdb/issues/638
|
Exception on /datasources/Somedata [PUT] Traceback (most recent call last): File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask\app.py", line 1950, in full_dispatch_request rv = self.dispatch_request() File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask\app.py", line 1936, in dispatch_request return self.view_functions[rule.endpoint](**req.view_args) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask_restx\api.py", line 375, in wrapper resp = resource(*args, **kwargs) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask\views.py", line 89, in view return self.dispatch_request(*args, **kwargs) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask_restx\resource.py", line 44, in dispatch_request resp = meth(*args, **kwargs) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask_restx\marshalling.py", line 248, in wrapper resp = f(*args, **kwargs) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\mindsdb\api\http\namespaces\datasource.py", line 114, in put ca.default_store.save_datasource(ds_name, source_type, source, file_path) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\mindsdb\interfaces\datastore\datastore.py", line 81, in save_datasource os.replace(file_path, source) PermissionError: [WinError 32] El proceso no tiene acceso al archivo porque est� siendo utilizado por otro proceso: 'C:\\Users\\jp299\\AppData\\Local\\Temp\\datasource_file_pzpfjtzc\\Somedata.xlsx' -> 'C:\\Users\\jp299\\AppData\\Roaming\\mindsdb_gui\\mindsdb_server\\env\\lib\\site-packages/var/datastore\\Somedata\datasource\\Somedata.xlsx'
|
PermissionError
|
def on_file(file):
nonlocal file_object
data["file"] = file.file_name.decode()
file_object = file.file_object
|
def on_file(file):
data["file"] = file.file_name.decode()
|
https://github.com/mindsdb/mindsdb/issues/638
|
Exception on /datasources/Somedata [PUT] Traceback (most recent call last): File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask\app.py", line 1950, in full_dispatch_request rv = self.dispatch_request() File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask\app.py", line 1936, in dispatch_request return self.view_functions[rule.endpoint](**req.view_args) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask_restx\api.py", line 375, in wrapper resp = resource(*args, **kwargs) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask\views.py", line 89, in view return self.dispatch_request(*args, **kwargs) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask_restx\resource.py", line 44, in dispatch_request resp = meth(*args, **kwargs) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask_restx\marshalling.py", line 248, in wrapper resp = f(*args, **kwargs) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\mindsdb\api\http\namespaces\datasource.py", line 114, in put ca.default_store.save_datasource(ds_name, source_type, source, file_path) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\mindsdb\interfaces\datastore\datastore.py", line 81, in save_datasource os.replace(file_path, source) PermissionError: [WinError 32] El proceso no tiene acceso al archivo porque est� siendo utilizado por otro proceso: 'C:\\Users\\jp299\\AppData\\Local\\Temp\\datasource_file_pzpfjtzc\\Somedata.xlsx' -> 'C:\\Users\\jp299\\AppData\\Roaming\\mindsdb_gui\\mindsdb_server\\env\\lib\\site-packages/var/datastore\\Somedata\datasource\\Somedata.xlsx'
|
PermissionError
|
def save_datasource(self, name, source_type, source, file_path=None):
if source_type == "file" and (file_path is None):
raise Exception('`file_path` argument required when source_type == "file"')
for i in range(1, 1000):
if name in [x["name"] for x in self.get_datasources()]:
previous_index = i - 1
name = name.replace(f"__{previous_index}__", "")
name = f"{name}__{i}__"
else:
break
ds_meta_dir = os.path.join(self.dir, name)
os.mkdir(ds_meta_dir)
ds_dir = os.path.join(ds_meta_dir, "datasource")
os.mkdir(ds_dir)
if source_type == "file":
try:
source = os.path.join(ds_dir, source)
shutil.move(file_path, source)
ds = FileDS(source)
except Exception:
shutil.rmtree(ds_meta_dir)
raise
picklable = {"class": "FileDS", "args": [source], "kwargs": {}}
elif source_type in self.config["integrations"]:
integration = self.config["integrations"][source_type]
dsClass = None
picklable = {
"args": [],
"kwargs": {
"query": source,
"user": integration["user"],
"password": integration["password"],
"host": integration["host"],
"port": integration["port"],
},
}
if integration["type"] == "clickhouse":
dsClass = ClickhouseDS
picklable["class"] = "ClickhouseDS"
elif integration["type"] == "mariadb":
dsClass = MariaDS
picklable["class"] = "MariaDS"
elif integration["type"] == "mysql":
dsClass = MySqlDS
picklable["class"] = "MySqlDS"
elif integration["type"] == "postgres":
dsClass = PostgresDS
picklable["class"] = "PostgresDS"
else:
raise ValueError(f"Unknown DS source_type: {source_type}")
try:
ds = dsClass(
query=source,
user=integration["user"],
password=integration["password"],
host=integration["host"],
port=integration["port"],
)
except Exception:
shutil.rmtree(ds_meta_dir)
raise
else:
# This probably only happens for urls
print("Create URL data source !")
try:
ds = FileDS(source)
except Exception:
shutil.rmtree(ds_meta_dir)
raise
picklable = {"class": "FileDS", "args": [source], "kwargs": {}}
df = ds.df
df_with_types = cast_df_columns_types(df, self.get_analysis(df)["data_analysis_v2"])
create_sqlite_db(os.path.join(ds_dir, "sqlite.db"), df_with_types)
with open(os.path.join(ds_dir, "ds.pickle"), "wb") as fp:
pickle.dump(picklable, fp)
with open(os.path.join(ds_dir, "metadata.json"), "w") as fp:
meta = {
"name": name,
"source_type": source_type,
"source": source,
"created_at": str(datetime.datetime.now()).split(".")[0],
"updated_at": str(datetime.datetime.now()).split(".")[0],
"row_count": len(df),
"columns": [dict(name=x) for x in list(df.keys())],
}
json.dump(meta, fp)
return self.get_datasource_obj(name, raw=True), name
|
def save_datasource(self, name, source_type, source, file_path=None):
if source_type == "file" and (file_path is None):
raise Exception('`file_path` argument required when source_type == "file"')
for i in range(1, 1000):
if name in [x["name"] for x in self.get_datasources()]:
previous_index = i - 1
name = name.replace(f"__{previous_index}__", "")
name = f"{name}__{i}__"
else:
break
ds_meta_dir = os.path.join(self.dir, name)
os.mkdir(ds_meta_dir)
ds_dir = os.path.join(ds_meta_dir, "datasource")
os.mkdir(ds_dir)
if source_type == "file":
try:
source = os.path.join(ds_dir, source)
os.replace(file_path, source)
ds = FileDS(source)
except Exception:
shutil.rmtree(ds_meta_dir)
raise
picklable = {"class": "FileDS", "args": [source], "kwargs": {}}
elif source_type in self.config["integrations"]:
integration = self.config["integrations"][source_type]
dsClass = None
picklable = {
"args": [],
"kwargs": {
"query": source,
"user": integration["user"],
"password": integration["password"],
"host": integration["host"],
"port": integration["port"],
},
}
if integration["type"] == "clickhouse":
dsClass = ClickhouseDS
picklable["class"] = "ClickhouseDS"
elif integration["type"] == "mariadb":
dsClass = MariaDS
picklable["class"] = "MariaDS"
elif integration["type"] == "mysql":
dsClass = MySqlDS
picklable["class"] = "MySqlDS"
elif integration["type"] == "postgres":
dsClass = PostgresDS
picklable["class"] = "PostgresDS"
else:
raise ValueError(f"Unknown DS source_type: {source_type}")
try:
ds = dsClass(
query=source,
user=integration["user"],
password=integration["password"],
host=integration["host"],
port=integration["port"],
)
except Exception:
shutil.rmtree(ds_meta_dir)
raise
else:
# This probably only happens for urls
print("Create URL data source !")
try:
ds = FileDS(source)
except Exception:
shutil.rmtree(ds_meta_dir)
raise
picklable = {"class": "FileDS", "args": [source], "kwargs": {}}
df = ds.df
df_with_types = cast_df_columns_types(df, self.get_analysis(df)["data_analysis_v2"])
create_sqlite_db(os.path.join(ds_dir, "sqlite.db"), df_with_types)
with open(os.path.join(ds_dir, "ds.pickle"), "wb") as fp:
pickle.dump(picklable, fp)
with open(os.path.join(ds_dir, "metadata.json"), "w") as fp:
meta = {
"name": name,
"source_type": source_type,
"source": source,
"created_at": str(datetime.datetime.now()).split(".")[0],
"updated_at": str(datetime.datetime.now()).split(".")[0],
"row_count": len(df),
"columns": [dict(name=x) for x in list(df.keys())],
}
json.dump(meta, fp)
return self.get_datasource_obj(name, raw=True), name
|
https://github.com/mindsdb/mindsdb/issues/638
|
Exception on /datasources/Somedata [PUT] Traceback (most recent call last): File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask\app.py", line 1950, in full_dispatch_request rv = self.dispatch_request() File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask\app.py", line 1936, in dispatch_request return self.view_functions[rule.endpoint](**req.view_args) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask_restx\api.py", line 375, in wrapper resp = resource(*args, **kwargs) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask\views.py", line 89, in view return self.dispatch_request(*args, **kwargs) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask_restx\resource.py", line 44, in dispatch_request resp = meth(*args, **kwargs) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\flask_restx\marshalling.py", line 248, in wrapper resp = f(*args, **kwargs) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\mindsdb\api\http\namespaces\datasource.py", line 114, in put ca.default_store.save_datasource(ds_name, source_type, source, file_path) File "C:\Users\jp299\AppData\Roaming\mindsdb_gui\mindsdb_server\env\lib\site-packages\mindsdb\interfaces\datastore\datastore.py", line 81, in save_datasource os.replace(file_path, source) PermissionError: [WinError 32] El proceso no tiene acceso al archivo porque est� siendo utilizado por otro proceso: 'C:\\Users\\jp299\\AppData\\Local\\Temp\\datasource_file_pzpfjtzc\\Somedata.xlsx' -> 'C:\\Users\\jp299\\AppData\\Roaming\\mindsdb_gui\\mindsdb_server\\env\\lib\\site-packages/var/datastore\\Somedata\datasource\\Somedata.xlsx'
|
PermissionError
|
def register_predictors(self, model_data_arr):
for model_meta in model_data_arr:
name = self._escape_table_name(model_meta["name"])
stats = model_meta["data_analysis"]
if "columns_to_ignore" in stats:
del stats["columns_to_ignore"]
columns_sql = ",".join(self._to_clickhouse_table(stats, model_meta["predict"]))
columns_sql += ",`select_data_query` Nullable(String)"
columns_sql += ",`external_datasource` Nullable(String)"
for col in model_meta["predict"]:
columns_sql += f",`{col}_confidence` Nullable(Float64)"
if model_meta["data_analysis"][col]["typing"]["data_type"] == "Numeric":
columns_sql += f",`{col}_min` Nullable(Float64)"
columns_sql += f",`{col}_max` Nullable(Float64)"
columns_sql += f",`{col}_explain` Nullable(String)"
msqyl_conn = (
self.config["api"]["mysql"]["host"]
+ ":"
+ str(self.config["api"]["mysql"]["port"])
)
msqyl_pass = self.config["api"]["mysql"]["password"]
msqyl_user = self._get_mysql_user()
q = f"""
CREATE TABLE mindsdb.{name}
({columns_sql}
) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', {name}, '{msqyl_user}', '{msqyl_pass}')
"""
self._query(q)
|
def register_predictors(self, model_data_arr):
for model_meta in model_data_arr:
name = model_meta["name"]
stats = model_meta["data_analysis"]
if "columns_to_ignore" in stats:
del stats["columns_to_ignore"]
columns_sql = ",".join(self._to_clickhouse_table(stats, model_meta["predict"]))
columns_sql += ",`select_data_query` Nullable(String)"
columns_sql += ",`external_datasource` Nullable(String)"
for col in model_meta["predict"]:
columns_sql += f",`{col}_confidence` Nullable(Float64)"
if model_meta["data_analysis"][col]["typing"]["data_type"] == "Numeric":
columns_sql += f",`{col}_min` Nullable(Float64)"
columns_sql += f",`{col}_max` Nullable(Float64)"
columns_sql += f",`{col}_explain` Nullable(String)"
msqyl_conn = (
self.config["api"]["mysql"]["host"]
+ ":"
+ str(self.config["api"]["mysql"]["port"])
)
msqyl_pass = self.config["api"]["mysql"]["password"]
msqyl_user = self._get_mysql_user()
q = f"""
CREATE TABLE mindsdb.{name}
({columns_sql}
) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', {name}, '{msqyl_user}', '{msqyl_pass}')
"""
self._query(q)
|
https://github.com/mindsdb/mindsdb/issues/593
|
Using configuration file: config.json
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/zoran/MyProjects/mindsdb/mindsdb/__main__.py", line 61, in <module>
dbw.register_predictors(model_data_arr)
File "/home/zoran/MyProjects/mindsdb/mindsdb/interfaces/database/database.py", line 41, in register_predictors
for integration in it: integration.register_predictors(model_data_arr)
File "/home/zoran/MyProjects/mindsdb/mindsdb/integrations/clickhouse/clickhouse.py", line 115, in register_predictors
self._query(q)
File "/home/zoran/MyProjects/mindsdb/mindsdb/integrations/clickhouse/clickhouse.py", line 58, in _query
raise Exception(f'Error: {response.content}\nQuery:{query}')
Exception: Error: b'Code: 62, e.displayText() = DB::Exception: Syntax error: failed at position 51 (line 2, col 50): -class\n ( `Number of times pregnant` Nullable(String) , `Plasma glucose concentration` Nullable(Int64) , `Diastolic blood pressure` Nullable(. Expected one of: storage definition, ENGINE, AS, UUID, OpeningRoundBracket, ON, Dot, token (version 20.5.2.7 (official build))\n'
Query:
CREATE TABLE mindsdb.diabetes-class
( `Number of times pregnant` Nullable(String) , `Plasma glucose concentration` Nullable(Int64) , `Diastolic blood pressure` Nullable(Int64) , `Triceps skin fold thickness` Nullable(Int64) , `2-Hour serum insulin` Nullable(Int64) , `Body mass index` Nullable(Float64) , `Diabetes pedigree function` Nullable(Float64) , `Age` Nullable(Int64) , `Class` Nullable(String) , `Class_original` Nullable(String) ,`select_data_query` Nullable(String),`Class_confidence` Nullable(Float64),`Class_explain` Nullable(String)
) ENGINE=MySQL('127.0.0.1:47335', 'mindsdb', 'diabetes-class_clickhouse', 'root', 'somepass')
|
Exception
|
def unregister_predictor(self, name):
q = f"""
drop table if exists mindsdb.{self._escape_table_name(name)};
"""
self._query(q)
|
def unregister_predictor(self, name):
q = f"""
drop table if exists mindsdb.{name};
"""
self._query(q)
|
https://github.com/mindsdb/mindsdb/issues/593
|
Using configuration file: config.json
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/zoran/MyProjects/mindsdb/mindsdb/__main__.py", line 61, in <module>
dbw.register_predictors(model_data_arr)
File "/home/zoran/MyProjects/mindsdb/mindsdb/interfaces/database/database.py", line 41, in register_predictors
for integration in it: integration.register_predictors(model_data_arr)
File "/home/zoran/MyProjects/mindsdb/mindsdb/integrations/clickhouse/clickhouse.py", line 115, in register_predictors
self._query(q)
File "/home/zoran/MyProjects/mindsdb/mindsdb/integrations/clickhouse/clickhouse.py", line 58, in _query
raise Exception(f'Error: {response.content}\nQuery:{query}')
Exception: Error: b'Code: 62, e.displayText() = DB::Exception: Syntax error: failed at position 51 (line 2, col 50): -class\n ( `Number of times pregnant` Nullable(String) , `Plasma glucose concentration` Nullable(Int64) , `Diastolic blood pressure` Nullable(. Expected one of: storage definition, ENGINE, AS, UUID, OpeningRoundBracket, ON, Dot, token (version 20.5.2.7 (official build))\n'
Query:
CREATE TABLE mindsdb.diabetes-class
( `Number of times pregnant` Nullable(String) , `Plasma glucose concentration` Nullable(Int64) , `Diastolic blood pressure` Nullable(Int64) , `Triceps skin fold thickness` Nullable(Int64) , `2-Hour serum insulin` Nullable(Int64) , `Body mass index` Nullable(Float64) , `Diabetes pedigree function` Nullable(Float64) , `Age` Nullable(Int64) , `Class` Nullable(String) , `Class_original` Nullable(String) ,`select_data_query` Nullable(String),`Class_confidence` Nullable(Float64),`Class_explain` Nullable(String)
) ENGINE=MySQL('127.0.0.1:47335', 'mindsdb', 'diabetes-class_clickhouse', 'root', 'somepass')
|
Exception
|
def register_predictors(self, model_data_arr):
for model_meta in model_data_arr:
name = model_meta["name"]
stats = model_meta["data_analysis"]
columns_sql = ",".join(self._to_mariadb_table(stats, model_meta["predict"]))
columns_sql += ",`select_data_query` varchar(500)"
columns_sql += ",`external_datasource` varchar(500)"
for col in model_meta["predict"]:
columns_sql += f",`{col}_confidence` double"
if model_meta["data_analysis"][col]["typing"]["data_type"] == "Numeric":
columns_sql += f",`{col}_min` double"
columns_sql += f",`{col}_max` double"
columns_sql += f",`{col}_explain` varchar(500)"
connect = self._get_connect_string(name)
q = f"""
CREATE TABLE mindsdb.{self._escape_table_name(name)}
({columns_sql}
) ENGINE=CONNECT TABLE_TYPE=MYSQL CONNECTION='{connect}';
"""
self._query(q)
|
def register_predictors(self, model_data_arr):
for model_meta in model_data_arr:
name = model_meta["name"]
stats = model_meta["data_analysis"]
columns_sql = ",".join(self._to_mariadb_table(stats, model_meta["predict"]))
columns_sql += ",`select_data_query` varchar(500)"
columns_sql += ",`external_datasource` varchar(500)"
for col in model_meta["predict"]:
columns_sql += f",`{col}_confidence` double"
if model_meta["data_analysis"][col]["typing"]["data_type"] == "Numeric":
columns_sql += f",`{col}_min` double"
columns_sql += f",`{col}_max` double"
columns_sql += f",`{col}_explain` varchar(500)"
connect = self._get_connect_string(name)
q = f"""
CREATE TABLE mindsdb.{name}
({columns_sql}
) ENGINE=CONNECT TABLE_TYPE=MYSQL CONNECTION='{connect}';
"""
self._query(q)
|
https://github.com/mindsdb/mindsdb/issues/593
|
Using configuration file: config.json
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/zoran/MyProjects/mindsdb/mindsdb/__main__.py", line 61, in <module>
dbw.register_predictors(model_data_arr)
File "/home/zoran/MyProjects/mindsdb/mindsdb/interfaces/database/database.py", line 41, in register_predictors
for integration in it: integration.register_predictors(model_data_arr)
File "/home/zoran/MyProjects/mindsdb/mindsdb/integrations/clickhouse/clickhouse.py", line 115, in register_predictors
self._query(q)
File "/home/zoran/MyProjects/mindsdb/mindsdb/integrations/clickhouse/clickhouse.py", line 58, in _query
raise Exception(f'Error: {response.content}\nQuery:{query}')
Exception: Error: b'Code: 62, e.displayText() = DB::Exception: Syntax error: failed at position 51 (line 2, col 50): -class\n ( `Number of times pregnant` Nullable(String) , `Plasma glucose concentration` Nullable(Int64) , `Diastolic blood pressure` Nullable(. Expected one of: storage definition, ENGINE, AS, UUID, OpeningRoundBracket, ON, Dot, token (version 20.5.2.7 (official build))\n'
Query:
CREATE TABLE mindsdb.diabetes-class
( `Number of times pregnant` Nullable(String) , `Plasma glucose concentration` Nullable(Int64) , `Diastolic blood pressure` Nullable(Int64) , `Triceps skin fold thickness` Nullable(Int64) , `2-Hour serum insulin` Nullable(Int64) , `Body mass index` Nullable(Float64) , `Diabetes pedigree function` Nullable(Float64) , `Age` Nullable(Int64) , `Class` Nullable(String) , `Class_original` Nullable(String) ,`select_data_query` Nullable(String),`Class_confidence` Nullable(Float64),`Class_explain` Nullable(String)
) ENGINE=MySQL('127.0.0.1:47335', 'mindsdb', 'diabetes-class_clickhouse', 'root', 'somepass')
|
Exception
|
def save_datasource(self, name, source_type, source, file_path=None):
if source_type == "file" and (file_path is None):
raise Exception('`file_path` argument required when source_type == "file"')
for i in range(1, 1000):
if name in [x["name"] for x in self.get_datasources()]:
previous_index = i - 1
name = name.replace(f"__{previous_index}__", "")
name = f"{name}__{i}__"
else:
break
ds_meta_dir = os.path.join(self.dir, name)
os.mkdir(ds_meta_dir)
ds_dir = os.path.join(ds_meta_dir, "datasource")
os.mkdir(ds_dir)
if source_type == "file":
source = os.path.join(ds_dir, source)
os.replace(file_path, source)
ds = FileDS(source)
picklable = {"class": "FileDS", "args": [source], "kwargs": {}}
elif source_type == "clickhouse":
user = self.config["integrations"]["default_clickhouse"]["user"]
password = self.config["integrations"]["default_clickhouse"]["password"]
# TODO add host port params
ds = ClickhouseDS(query=source, user=user, password=password)
picklable = {
"class": "ClickhouseDS",
"args": [],
"kwargs": {"query": source, "user": user, "password": password},
}
elif source_type == "mariadb":
user = self.config["integrations"]["default_mariadb"]["user"]
password = self.config["integrations"]["default_mariadb"]["password"]
host = self.config["integrations"]["default_mariadb"]["host"]
port = self.config["integrations"]["default_mariadb"]["port"]
ds = MariaDS(query=source, user=user, password=password, host=host, port=port)
picklable = {
"class": "MariaDS",
"args": [],
"kwargs": {
"query": source,
"user": user,
"password": password,
"host": host,
"port": port,
},
}
else:
# This probably only happens for urls
print("Create URL data source !")
ds = FileDS(source)
picklable = {"class": "FileDS", "args": [source], "kwargs": {}}
df = ds.df
df_with_types = cast_df_columns_types(df, self.get_analysis(df)["data_analysis_v2"])
create_sqlite_db(os.path.join(ds_dir, "sqlite.db"), df_with_types)
with open(os.path.join(ds_dir, "ds.pickle"), "wb") as fp:
pickle.dump(picklable, fp)
with open(os.path.join(ds_dir, "metadata.json"), "w") as fp:
json.dump(
{
"name": name,
"source_type": source_type,
"source": source,
"created_at": str(datetime.datetime.now()).split(".")[0],
"updated_at": str(datetime.datetime.now()).split(".")[0],
"row_count": len(df),
"columns": [dict(name=x) for x in list(df.keys())],
},
fp,
)
return self.get_datasource_obj(name, raw=True)
|
def save_datasource(self, name, source_type, source, file_path=None):
if source_type == "file" and (file_path is None):
raise Exception('`file_path` argument required when source_type == "file"')
for i in range(1, 1000):
if name in [x["name"] for x in self.get_datasources()]:
previous_index = i - 1
name = name.replace(f"__{previous_index}__", "")
name = f"{name}__{i}__"
else:
break
ds_meta_dir = os.path.join(self.dir, name)
os.mkdir(ds_meta_dir)
ds_dir = os.path.join(ds_meta_dir, "datasource")
os.mkdir(ds_dir)
if source_type == "file":
source = os.path.join(ds_dir, source)
os.replace(file_path, source)
ds = FileDS(source)
picklable = {"class": "FileDS", "args": [source], "kwargs": {}}
elif source_type == "clickhouse":
user = self.config["integrations"]["default_clickhouse"]["user"]
password = self.config["integrations"]["default_clickhouse"]["password"]
# TODO add host port params
ds = ClickhouseDS(source, user=user, password=password)
picklable = {
"class": "ClickhouseDS",
"args": [source],
"kwargs": {"user": user, "password": password},
}
elif source_type == "mariadb":
user = self.config["integrations"]["default_mariadb"]["user"]
password = self.config["integrations"]["default_mariadb"]["password"]
host = self.config["integrations"]["default_mariadb"]["host"]
port = self.config["integrations"]["default_mariadb"]["port"]
ds = MariaDS(source, user=user, password=password, host=host, port=port)
picklable = {
"class": "MariaDS",
"args": [source],
"kwargs": {"user": user, "password": password, "host": host, "port": port},
}
else:
# This probably only happens for urls
print("Create URL data source !")
ds = FileDS(source)
picklable = {"class": "FileDS", "args": [source], "kwargs": {}}
df = ds.df
df_with_types = cast_df_columns_types(df, self.get_analysis(df)["data_analysis_v2"])
create_sqlite_db(os.path.join(ds_dir, "sqlite.db"), df_with_types)
with open(os.path.join(ds_dir, "ds.pickle"), "wb") as fp:
pickle.dump(picklable, fp)
with open(os.path.join(ds_dir, "metadata.json"), "w") as fp:
json.dump(
{
"name": name,
"source_type": source_type,
"source": source,
"created_at": str(datetime.datetime.now()).split(".")[0],
"updated_at": str(datetime.datetime.now()).split(".")[0],
"row_count": len(df),
"columns": [dict(name=x) for x in list(df.keys())],
},
fp,
)
return self.get_datasource_obj(name, raw=True)
|
https://github.com/mindsdb/mindsdb/issues/593
|
Using configuration file: config.json
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/zoran/MyProjects/mindsdb/mindsdb/__main__.py", line 61, in <module>
dbw.register_predictors(model_data_arr)
File "/home/zoran/MyProjects/mindsdb/mindsdb/interfaces/database/database.py", line 41, in register_predictors
for integration in it: integration.register_predictors(model_data_arr)
File "/home/zoran/MyProjects/mindsdb/mindsdb/integrations/clickhouse/clickhouse.py", line 115, in register_predictors
self._query(q)
File "/home/zoran/MyProjects/mindsdb/mindsdb/integrations/clickhouse/clickhouse.py", line 58, in _query
raise Exception(f'Error: {response.content}\nQuery:{query}')
Exception: Error: b'Code: 62, e.displayText() = DB::Exception: Syntax error: failed at position 51 (line 2, col 50): -class\n ( `Number of times pregnant` Nullable(String) , `Plasma glucose concentration` Nullable(Int64) , `Diastolic blood pressure` Nullable(. Expected one of: storage definition, ENGINE, AS, UUID, OpeningRoundBracket, ON, Dot, token (version 20.5.2.7 (official build))\n'
Query:
CREATE TABLE mindsdb.diabetes-class
( `Number of times pregnant` Nullable(String) , `Plasma glucose concentration` Nullable(Int64) , `Diastolic blood pressure` Nullable(Int64) , `Triceps skin fold thickness` Nullable(Int64) , `2-Hour serum insulin` Nullable(Int64) , `Body mass index` Nullable(Float64) , `Diabetes pedigree function` Nullable(Float64) , `Age` Nullable(Int64) , `Class` Nullable(String) , `Class_original` Nullable(String) ,`select_data_query` Nullable(String),`Class_confidence` Nullable(Float64),`Class_explain` Nullable(String)
) ENGINE=MySQL('127.0.0.1:47335', 'mindsdb', 'diabetes-class_clickhouse', 'root', 'somepass')
|
Exception
|
def loadFromPacketString(self, packet_string):
len_header = struct.unpack(">i", struct.pack("1s", "") + packet_string[:3])[0]
count_header = struct.unpack("B", packet_string[3])[0]
body = packet_string[4:]
self.loadFromParams(length=len_header, seq=count_header, body=body)
|
def loadFromPacketString(self, packet_string):
len_header = struct.unpack(">i", struct.pack("1s", "") + packet_string[:3])[0]
count_header = struct.unpack("b", packet_string[3])[0]
body = packet_string[4:]
self.loadFromParams(length=len_header, seq=count_header, body=body)
|
https://github.com/mindsdb/mindsdb/issues/530
|
2020-06-25 21:34:22,721 - ERROR - ERROR while executing query: SELECT `rental_price`, `$select_data_query` FROM `demo_rental_price_predictor_mariadb` WHERE $select_data_query= 'SELECT * FROM test.home_rentals limit 1000'
Traceback (most recent call last):
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 805, in handle
self.queryAnswer(sql)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 461, in queryAnswer
return self.selectAnswer(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 726, in selectAnswer
self.answerTableQuery(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 279, in answerTableQuery
self.sendPackageGroup(packages)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in sendPackageGroup
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in <listcomp>
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 92, in accum
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
byte format requires -128 <= number <= 127
ERROR:mindsdb_sql:ERROR while executing query: SELECT `rental_price`, `$select_data_query` FROM `demo_rental_price_predictor_mariadb` WHERE $select_data_query= 'SELECT * FROM test.home_rentals limit 1000'
Traceback (most recent call last):
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 805, in handle
self.queryAnswer(sql)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 461, in queryAnswer
return self.selectAnswer(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 726, in selectAnswer
self.answerTableQuery(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 279, in answerTableQuery
self.sendPackageGroup(packages)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in sendPackageGroup
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in <listcomp>
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 92, in accum
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
byte format requires -128 <= number <= 127
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37752)
Traceback (most recent call last):
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 805, in handle
self.queryAnswer(sql)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 461, in queryAnswer
return self.selectAnswer(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 726, in selectAnswer
self.answerTableQuery(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 279, in answerTableQuery
self.sendPackageGroup(packages)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in sendPackageGroup
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in <listcomp>
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 92, in accum
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.7/socketserver.py", line 650, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.7/socketserver.py", line 360, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/usr/lib/python3.7/socketserver.py", line 720, in __init__
self.handle()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 815, in handle
msg=str(e)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 86, in send
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
----------------------------------------
|
struct.error
|
def getPacketString(self):
body = self.body
len_header = struct.pack("<i", self.length)[:3] # keep it 3 bytes
count_header = struct.pack("B", self.seq)
packet = len_header + count_header + body
return packet
|
def getPacketString(self):
body = self.body
len_header = struct.pack("<i", self.length)[:3] # keep it 3 bytes
count_header = struct.pack("b", self.seq)
packet = len_header + count_header + body
return packet
|
https://github.com/mindsdb/mindsdb/issues/530
|
2020-06-25 21:34:22,721 - ERROR - ERROR while executing query: SELECT `rental_price`, `$select_data_query` FROM `demo_rental_price_predictor_mariadb` WHERE $select_data_query= 'SELECT * FROM test.home_rentals limit 1000'
Traceback (most recent call last):
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 805, in handle
self.queryAnswer(sql)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 461, in queryAnswer
return self.selectAnswer(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 726, in selectAnswer
self.answerTableQuery(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 279, in answerTableQuery
self.sendPackageGroup(packages)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in sendPackageGroup
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in <listcomp>
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 92, in accum
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
byte format requires -128 <= number <= 127
ERROR:mindsdb_sql:ERROR while executing query: SELECT `rental_price`, `$select_data_query` FROM `demo_rental_price_predictor_mariadb` WHERE $select_data_query= 'SELECT * FROM test.home_rentals limit 1000'
Traceback (most recent call last):
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 805, in handle
self.queryAnswer(sql)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 461, in queryAnswer
return self.selectAnswer(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 726, in selectAnswer
self.answerTableQuery(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 279, in answerTableQuery
self.sendPackageGroup(packages)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in sendPackageGroup
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in <listcomp>
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 92, in accum
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
byte format requires -128 <= number <= 127
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37752)
Traceback (most recent call last):
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 805, in handle
self.queryAnswer(sql)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 461, in queryAnswer
return self.selectAnswer(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 726, in selectAnswer
self.answerTableQuery(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 279, in answerTableQuery
self.sendPackageGroup(packages)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in sendPackageGroup
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in <listcomp>
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 92, in accum
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.7/socketserver.py", line 650, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.7/socketserver.py", line 360, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/usr/lib/python3.7/socketserver.py", line 720, in __init__
self.handle()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 815, in handle
msg=str(e)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 86, in send
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
----------------------------------------
|
struct.error
|
def get(self):
self.session.logging.info(f"Get packet: {self.__class__.__name__}")
len_header = MAX_PACKET_SIZE
body = b""
count_header = 1
while len_header == MAX_PACKET_SIZE:
packet_string = self.mysql_socket.recv(4)
if len(packet_string) < 4:
self.session.logging.warning(
f"Packet with less than 4 bytes in length: {packet_string}"
)
return False
break
len_header = struct.unpack("i", packet_string[:3] + b"\x00")[0]
count_header = int(packet_string[3])
if len_header == 0:
break
body += self.mysql_socket.recv(len_header)
self.session.logging.info(f"Got packet: {str(body)}")
self.proxy.count = (int(count_header) + 1) % 256
self.setup(len(body), count_header, body)
return True
|
def get(self):
self.session.logging.info(f"Get packet: {self.__class__.__name__}")
len_header = MAX_PACKET_SIZE
body = b""
count_header = 1
while len_header == MAX_PACKET_SIZE:
packet_string = self.mysql_socket.recv(4)
if len(packet_string) < 4:
self.session.logging.warning(
f"Packet with less than 4 bytes in length: {packet_string}"
)
return False
break
len_header = struct.unpack("i", packet_string[:3] + b"\x00")[0]
count_header = int(packet_string[3])
if len_header == 0:
break
body += self.mysql_socket.recv(len_header)
self.session.logging.info(f"Got packet: {str(body)}")
self.proxy.count = int(count_header) + 1
self.setup(len(body), count_header, body)
return True
|
https://github.com/mindsdb/mindsdb/issues/530
|
2020-06-25 21:34:22,721 - ERROR - ERROR while executing query: SELECT `rental_price`, `$select_data_query` FROM `demo_rental_price_predictor_mariadb` WHERE $select_data_query= 'SELECT * FROM test.home_rentals limit 1000'
Traceback (most recent call last):
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 805, in handle
self.queryAnswer(sql)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 461, in queryAnswer
return self.selectAnswer(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 726, in selectAnswer
self.answerTableQuery(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 279, in answerTableQuery
self.sendPackageGroup(packages)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in sendPackageGroup
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in <listcomp>
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 92, in accum
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
byte format requires -128 <= number <= 127
ERROR:mindsdb_sql:ERROR while executing query: SELECT `rental_price`, `$select_data_query` FROM `demo_rental_price_predictor_mariadb` WHERE $select_data_query= 'SELECT * FROM test.home_rentals limit 1000'
Traceback (most recent call last):
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 805, in handle
self.queryAnswer(sql)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 461, in queryAnswer
return self.selectAnswer(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 726, in selectAnswer
self.answerTableQuery(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 279, in answerTableQuery
self.sendPackageGroup(packages)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in sendPackageGroup
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in <listcomp>
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 92, in accum
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
byte format requires -128 <= number <= 127
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37752)
Traceback (most recent call last):
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 805, in handle
self.queryAnswer(sql)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 461, in queryAnswer
return self.selectAnswer(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 726, in selectAnswer
self.answerTableQuery(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 279, in answerTableQuery
self.sendPackageGroup(packages)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in sendPackageGroup
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in <listcomp>
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 92, in accum
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.7/socketserver.py", line 650, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.7/socketserver.py", line 360, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/usr/lib/python3.7/socketserver.py", line 720, in __init__
self.handle()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 815, in handle
msg=str(e)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 86, in send
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
----------------------------------------
|
struct.error
|
def packet(self, packetClass=Packet, **kwargs):
"""
Factory method for packets
:param packetClass:
:param kwargs:
:return:
"""
p = packetClass(
socket=self.socket, seq=self.count, session=self.session, proxy=self, **kwargs
)
self.count = (self.count + 1) % 256
return p
|
def packet(self, packetClass=Packet, **kwargs):
"""
Factory method for packets
:param packetClass:
:param kwargs:
:return:
"""
p = packetClass(
socket=self.socket, seq=self.count, session=self.session, proxy=self, **kwargs
)
self.count += 1
return p
|
https://github.com/mindsdb/mindsdb/issues/530
|
2020-06-25 21:34:22,721 - ERROR - ERROR while executing query: SELECT `rental_price`, `$select_data_query` FROM `demo_rental_price_predictor_mariadb` WHERE $select_data_query= 'SELECT * FROM test.home_rentals limit 1000'
Traceback (most recent call last):
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 805, in handle
self.queryAnswer(sql)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 461, in queryAnswer
return self.selectAnswer(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 726, in selectAnswer
self.answerTableQuery(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 279, in answerTableQuery
self.sendPackageGroup(packages)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in sendPackageGroup
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in <listcomp>
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 92, in accum
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
byte format requires -128 <= number <= 127
ERROR:mindsdb_sql:ERROR while executing query: SELECT `rental_price`, `$select_data_query` FROM `demo_rental_price_predictor_mariadb` WHERE $select_data_query= 'SELECT * FROM test.home_rentals limit 1000'
Traceback (most recent call last):
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 805, in handle
self.queryAnswer(sql)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 461, in queryAnswer
return self.selectAnswer(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 726, in selectAnswer
self.answerTableQuery(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 279, in answerTableQuery
self.sendPackageGroup(packages)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in sendPackageGroup
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in <listcomp>
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 92, in accum
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
byte format requires -128 <= number <= 127
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37752)
Traceback (most recent call last):
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 805, in handle
self.queryAnswer(sql)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 461, in queryAnswer
return self.selectAnswer(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 726, in selectAnswer
self.answerTableQuery(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 279, in answerTableQuery
self.sendPackageGroup(packages)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in sendPackageGroup
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in <listcomp>
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 92, in accum
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.7/socketserver.py", line 650, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.7/socketserver.py", line 360, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/usr/lib/python3.7/socketserver.py", line 720, in __init__
self.handle()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 815, in handle
msg=str(e)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 86, in send
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
----------------------------------------
|
struct.error
|
def handshake(self):
global HARDCODED_PASSWORD, HARDCODED_USER
def switch_auth(method="mysql_native_password"):
self.packet(SwitchOutPacket, seed=self.salt, method=method).send()
switch_out_answer = self.packet(SwitchOutResponse)
switch_out_answer.get()
return switch_out_answer.enc_password.value
def get_fast_auth_password():
log.info("Asking for fast auth password")
self.packet(FastAuthFail).send()
password_answer = self.packet(PasswordAnswer)
password_answer.get()
try:
password = password_answer.password.value.decode()
except Exception:
log.info(f"error: no password in Fast Auth answer")
self.packet(
ErrPacket,
err_code=ERR.ER_PASSWORD_NO_MATCH,
msg=f"Is not password in connection query.",
).send()
return None
return password
if self.session is None:
self.initSession()
log.info("send HandshakePacket")
self.packet(HandshakePacket).send()
handshake_resp = self.packet(HandshakeResponsePacket)
handshake_resp.get()
if handshake_resp.length == 0:
log.warning("HandshakeResponsePacket empty")
self.packet(OkPacket).send()
return False
self.client_capabilities = ClentCapabilities(handshake_resp.capabilities.value)
client_auth_plugin = handshake_resp.client_auth_plugin.value.decode()
orig_username = HARDCODED_USER
orig_password = HARDCODED_PASSWORD
orig_password_hash = handshake_resp.scramble_func(HARDCODED_PASSWORD, self.salt)
username = None
password = None
self.session.is_ssl = False
if handshake_resp.type == "SSLRequest":
log.info("switch to SSL")
self.session.is_ssl = True
ssl_socket = ssl.wrap_socket(
self.socket,
server_side=True,
certfile=CERT_PATH,
do_handshake_on_connect=True,
)
self.socket = ssl_socket
handshake_resp = self.packet(HandshakeResponsePacket)
handshake_resp.get()
client_auth_plugin = handshake_resp.client_auth_plugin.value.decode()
username = handshake_resp.username.value.decode()
if orig_username == username and HARDCODED_PASSWORD == "":
log.info(
f"Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: "
"empty password"
)
password = ""
elif (
(DEFAULT_AUTH_METHOD not in client_auth_plugin)
or self.session.is_ssl is False
and "caching_sha2_password" in client_auth_plugin
):
new_method = (
"caching_sha2_password"
if "caching_sha2_password" in client_auth_plugin
else "mysql_native_password"
)
if new_method == "caching_sha2_password" and self.session.is_ssl is False:
log.info(
f"Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: "
"error: cant switch to caching_sha2_password without SSL"
)
self.packet(
ErrPacket,
err_code=ERR.ER_PASSWORD_NO_MATCH,
msg=f"caching_sha2_password without SSL not supported",
).send()
return False
log.info(
f"Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: "
f"switch auth method to {new_method}"
)
password = switch_auth(new_method)
if new_method == "caching_sha2_password":
password = get_fast_auth_password()
else:
orig_password = orig_password_hash
elif "caching_sha2_password" in client_auth_plugin:
log.info(
f"Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: "
"check auth using caching_sha2_password"
)
password = get_fast_auth_password()
orig_password = HARDCODED_PASSWORD
elif "mysql_native_password" in client_auth_plugin:
log.info(
f"Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: "
"check auth using mysql_native_password"
)
password = handshake_resp.enc_password.value
orig_password = orig_password_hash
else:
log.info(
f"Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: "
"unknown method, possible ERROR. Try to switch to mysql_native_password"
)
password = switch_auth("mysql_native_password")
orig_password = orig_password_hash
try:
self.session.database = handshake_resp.database.value.decode()
except Exception:
self.session.database = None
log.info(
f"Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: "
f"connecting to database {self.session.database}"
)
if self.isAuthOk(username, orig_username, password, orig_password):
self.packet(OkPacket).send()
return True
else:
self.packet(
ErrPacket,
err_code=ERR.ER_PASSWORD_NO_MATCH,
msg=f"Access denied for user {username}",
).send()
log.warning("AUTH FAIL")
return False
|
def handshake(self):
global HARDCODED_PASSWORD, HARDCODED_USER
def switch_auth(method="mysql_native_password"):
self.packet(SwitchOutPacket, seed=self.salt, method=method).send()
switch_out_answer = self.packet(SwitchOutResponse)
switch_out_answer.get()
return switch_out_answer.enc_password.value
if self.session is None:
self.initSession()
log.info("send HandshakePacket")
self.packet(HandshakePacket).send()
handshake_resp = self.packet(HandshakeResponsePacket)
handshake_resp.get()
if handshake_resp.length == 0:
log.warning("HandshakeResponsePacket empty")
self.packet(OkPacket).send()
return False
self.client_capabilities = ClentCapabilities(handshake_resp.capabilities.value)
client_auth_plugin = handshake_resp.client_auth_plugin.value.decode()
orig_username = HARDCODED_USER
orig_password = HARDCODED_PASSWORD
orig_password_hash = handshake_resp.scramble_func(HARDCODED_PASSWORD, self.salt)
username = None
password = None
self.session.is_ssl = False
if handshake_resp.type == "SSLRequest":
log.info("switch to SSL")
self.session.is_ssl = True
ssl_socket = ssl.wrap_socket(
self.socket,
server_side=True,
certfile=CERT_PATH,
do_handshake_on_connect=True,
)
self.socket = ssl_socket
handshake_resp = self.packet(HandshakeResponsePacket)
handshake_resp.get()
client_auth_plugin = handshake_resp.client_auth_plugin.value.decode()
username = handshake_resp.username.value.decode()
if orig_username == username and HARDCODED_PASSWORD == "":
log.info(
f"Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: "
"empty password"
)
password = ""
elif (
(DEFAULT_AUTH_METHOD not in client_auth_plugin)
or self.session.is_ssl is False
and "caching_sha2_password" in client_auth_plugin
):
new_method = (
"caching_sha2_password"
if "caching_sha2_password" in client_auth_plugin
else "mysql_native_password"
)
if new_method == "caching_sha2_password" and self.session.is_ssl is False:
log.info(
f"Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: "
"error: cant switch to caching_sha2_password without SSL"
)
self.packet(
ErrPacket,
err_code=ERR.ER_PASSWORD_NO_MATCH,
msg=f"caching_sha2_password without SSL not supported",
).send()
return False
log.info(
f"Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: "
f"switch auth method to {new_method}"
)
password = switch_auth(new_method)
if new_method == "caching_sha2_password":
self.packet(FastAuthFail).send()
password_answer = self.packet(PasswordAnswer)
password_answer.get()
password = password_answer.password.value.decode()
else:
orig_password = orig_password_hash
elif "caching_sha2_password" in client_auth_plugin:
self.packet(FastAuthFail).send()
log.info(
f"Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: "
"check auth using caching_sha2_password"
)
password_answer = self.packet(PasswordAnswer)
password_answer.get()
password = password_answer.password.value.decode()
orig_password = HARDCODED_PASSWORD
elif "mysql_native_password" in client_auth_plugin:
log.info(
f"Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: "
"check auth using mysql_native_password"
)
password = handshake_resp.enc_password.value
orig_password = orig_password_hash
else:
log.info(
f"Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: "
"unknown method, possible ERROR. Try to switch to mysql_native_password"
)
password = switch_auth("mysql_native_password")
orig_password = orig_password_hash
try:
self.session.database = handshake_resp.database.value.decode()
except Exception:
self.session.database = None
log.info(
f"Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: "
f"connecting to database {self.session.database}"
)
if self.isAuthOk(username, orig_username, password, orig_password):
self.packet(OkPacket).send()
return True
else:
self.packet(
ErrPacket,
err_code=ERR.ER_PASSWORD_NO_MATCH,
msg=f"Access denied for user {username}",
).send()
log.warning("AUTH FAIL")
return False
|
https://github.com/mindsdb/mindsdb/issues/538
|
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 43192)
Traceback (most recent call last):
File "/usr/lib/python3.7/socketserver.py", line 650, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.7/socketserver.py", line 360, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/usr/lib/python3.7/socketserver.py", line 720, in __init__
self.handle()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 767, in handle
if self.handshake() is False:
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 187, in handshake
password = password_answer.password.value.decode()
AttributeError: 'PasswordAnswer' object has no attribute 'password'
----------------------------------------
|
AttributeError
|
def initialize_interfaces(config, app):
app.default_store = DataStore(config)
app.mindsdb_native = MindsdbNative(config)
|
def initialize_interfaces(config, app):
with app.app_context():
g.default_store = DataStore(config)
g.mindsdb_native = MindsdbNative(config)
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def get(self):
"""List all datasources"""
return ca.default_store.get_datasources()
|
def get(self):
"""List all datasources"""
return g.default_store.get_datasources()
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def get(self, name):
"""return datasource metadata"""
ds = ca.default_store.get_datasource(name)
if ds is not None:
return ds
return "", 404
|
def get(self, name):
"""return datasource metadata"""
ds = g.default_store.get_datasource(name)
if ds is not None:
return ds
return "", 404
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def delete(self, name):
"""delete datasource"""
try:
ca.default_store.delete_datasource(name)
except Exception as e:
print(e)
abort(400, str(e))
return "", 200
|
def delete(self, name):
"""delete datasource"""
try:
g.default_store.delete_datasource(name)
except Exception as e:
print(e)
abort(400, str(e))
return "", 200
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def put(self, name):
"""add new datasource"""
data = {}
def on_field(field):
print(f"\n\n{field}\n\n")
name = field.field_name.decode()
value = field.value.decode()
data[name] = value
def on_file(file):
data["file"] = file.file_name.decode()
temp_dir_path = tempfile.mkdtemp(prefix="datasource_file_")
if request.headers["Content-Type"].startswith("multipart/form-data"):
parser = multipart.create_form_parser(
headers=request.headers,
on_field=on_field,
on_file=on_file,
config={
"UPLOAD_DIR": temp_dir_path.encode(), # bytes required
"UPLOAD_KEEP_FILENAME": True,
"UPLOAD_KEEP_EXTENSIONS": True,
"MAX_MEMORY_FILE_SIZE": 0,
},
)
while True:
chunk = request.stream.read(8192)
if not chunk:
break
parser.write(chunk)
parser.finalize()
parser.close()
else:
data = request.json
ds_name = data["name"] if "name" in data else name
source = data["source"] if "source" in data else name
source_type = data["source_type"]
if source_type == "file":
file_path = os.path.join(temp_dir_path, data["file"])
else:
file_path = None
ca.default_store.save_datasource(ds_name, source_type, source, file_path)
os.rmdir(temp_dir_path)
return ca.default_store.get_datasource(ds_name)
|
def put(self, name):
"""add new datasource"""
data = {}
def on_field(field):
print(f"\n\n{field}\n\n")
name = field.field_name.decode()
value = field.value.decode()
data[name] = value
def on_file(file):
data["file"] = file.file_name.decode()
temp_dir_path = tempfile.mkdtemp(prefix="datasource_file_")
if request.headers["Content-Type"].startswith("multipart/form-data"):
parser = multipart.create_form_parser(
headers=request.headers,
on_field=on_field,
on_file=on_file,
config={
"UPLOAD_DIR": temp_dir_path.encode(), # bytes required
"UPLOAD_KEEP_FILENAME": True,
"UPLOAD_KEEP_EXTENSIONS": True,
"MAX_MEMORY_FILE_SIZE": 0,
},
)
while True:
chunk = request.stream.read(8192)
if not chunk:
break
parser.write(chunk)
parser.finalize()
parser.close()
else:
data = request.json
ds_name = data["name"] if "name" in data else name
source = data["source"] if "source" in data else name
source_type = data["source_type"]
if source_type == "file":
file_path = os.path.join(temp_dir_path, data["file"])
else:
file_path = None
g.default_store.save_datasource(ds_name, source_type, source, file_path)
os.rmdir(temp_dir_path)
return g.default_store.get_datasource(ds_name)
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def get(self, name):
ds = ca.default_store.get_datasource(name)
if ds is None:
print("No valid datasource given")
abort(400, "No valid datasource given")
analysis = ca.default_store.get_analysis(ds["source"])
return analysis, 200
|
def get(self, name):
ds = g.default_store.get_datasource(name)
if ds is None:
print("No valid datasource given")
abort(400, "No valid datasource given")
analysis = g.default_store.get_analysis(ds["source"])
return analysis, 200
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def get(self, name):
ds = get_datasource(name)
if ds is None:
print("No valid datasource given")
abort(400, "No valid datasource given")
where = []
for key, value in request.args.items():
if key.startswith("filter"):
param = parse_filter(key, value)
if param is None:
abort(400, f'Not valid filter "{key}"')
where.append(param)
data_dict = ca.default_store.get_data(ds["name"], where)
if data_dict["rowcount"] == 0:
return abort(400, "Empty dataset after filters applying")
return get_analysis(pd.DataFrame(data_dict["data"])), 200
|
def get(self, name):
ds = get_datasource(name)
if ds is None:
print("No valid datasource given")
abort(400, "No valid datasource given")
where = []
for key, value in request.args.items():
if key.startswith("filter"):
param = parse_filter(key, value)
if param is None:
abort(400, f'Not valid filter "{key}"')
where.append(param)
data_dict = g.default_store.get_data(ds["name"], where)
if data_dict["rowcount"] == 0:
return abort(400, "Empty dataset after filters applying")
return get_analysis(pd.DataFrame(data_dict["data"])), 200
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def get(self, name):
"""return data rows"""
ds = ca.default_store.get_datasource(name)
if ds is None:
abort(400, "No valid datasource given")
params = {"page[size]": None, "page[offset]": None}
where = []
for key, value in request.args.items():
if key == "page[size]":
params["page[size]"] = int(value)
if key == "page[offset]":
params["page[offset]"] = int(value)
elif key.startswith("filter"):
param = parse_filter(key, value)
if param is None:
abort(400, f'Not valid filter "{key}"')
where.append(param)
data_dict = ca.default_store.get_data(
name, where, params["page[size]"], params["page[offset]"]
)
return data_dict, 200
|
def get(self, name):
"""return data rows"""
ds = g.default_store.get_datasource(name)
if ds is None:
abort(400, "No valid datasource given")
params = {"page[size]": None, "page[offset]": None}
where = []
for key, value in request.args.items():
if key == "page[size]":
params["page[size]"] = int(value)
if key == "page[offset]":
params["page[offset]"] = int(value)
elif key.startswith("filter"):
param = parse_filter(key, value)
if param is None:
abort(400, f'Not valid filter "{key}"')
where.append(param)
data_dict = g.default_store.get_data(
name, where, params["page[size]"], params["page[offset]"]
)
return data_dict, 200
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def get(self, name):
"""download uploaded file"""
ds = ca.default_store.get_datasource(name)
if not ds:
abort(404, "{} not found".format(name))
if not os.path.exists(ds["source"]):
abort(404, "{} not found".format(name))
return send_file(os.path.abspath(ds["source"]), as_attachment=True)
|
def get(self, name):
"""download uploaded file"""
ds = g.default_store.get_datasource(name)
if not ds:
abort(404, "{} not found".format(name))
if not os.path.exists(ds["source"]):
abort(404, "{} not found".format(name))
return send_file(os.path.abspath(ds["source"]), as_attachment=True)
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def get(self):
"""List all predictors"""
return ca.mindsdb_native.get_models()
|
def get(self):
"""List all predictors"""
return g.mindsdb_native.get_models()
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def get(self, name):
try:
model = ca.mindsdb_native.get_model_data(name)
except Exception as e:
abort(404, "")
for k in ["train_end_at", "updated_at", "created_at"]:
if k in model and model[k] is not None:
model[k] = parse_datetime(model[k])
return model
|
def get(self, name):
try:
model = g.mindsdb_native.get_model_data(name)
except Exception as e:
abort(404, "")
for k in ["train_end_at", "updated_at", "created_at"]:
if k in model and model[k] is not None:
model[k] = parse_datetime(model[k])
return model
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def delete(self, name):
"""Remove predictor"""
ca.mindsdb_native.delete_model(name)
return "", 200
|
def delete(self, name):
"""Remove predictor"""
g.mindsdb_native.delete_model(name)
return "", 200
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def put(self, name):
"""Learning new predictor"""
global model_swapping_map
data = request.json
to_predict = data.get("to_predict")
try:
kwargs = data.get("kwargs")
except:
kwargs = None
if type(kwargs) != type({}):
kwargs = {}
if "stop_training_in_x_seconds" not in kwargs:
kwargs["stop_training_in_x_seconds"] = 100
if "equal_accuracy_for_all_output_categories" not in kwargs:
kwargs["equal_accuracy_for_all_output_categories"] = True
if "sample_margin_of_error" not in kwargs:
kwargs["sample_margin_of_error"] = 0.005
if "unstable_parameters_dict" not in kwargs:
kwargs["unstable_parameters_dict"] = {}
if "use_selfaware_model" not in kwargs["unstable_parameters_dict"]:
kwargs["unstable_parameters_dict"]["use_selfaware_model"] = False
try:
retrain = data.get("retrain")
if retrain in ("true", "True"):
retrain = True
else:
retrain = False
except:
retrain = None
ds_name = (
data.get("data_source_name")
if data.get("data_source_name") is not None
else data.get("from_data")
)
from_data = ca.default_store.get_datasource_obj(ds_name)
if retrain is True:
original_name = name
name = name + "_retrained"
ca.mindsdb_native.learn(name, from_data, to_predict, kwargs)
if retrain is True:
try:
model_swapping_map[original_name] = True
ca.mindsdb_native.delete_model(original_name)
ca.mindsdb_native.rename_model(name, original_name)
model_swapping_map[original_name] = False
except:
model_swapping_map[original_name] = False
return "", 200
|
def put(self, name):
"""Learning new predictor"""
global model_swapping_map
data = request.json
to_predict = data.get("to_predict")
try:
kwargs = data.get("kwargs")
except:
kwargs = None
if type(kwargs) != type({}):
kwargs = {}
if "stop_training_in_x_seconds" not in kwargs:
kwargs["stop_training_in_x_seconds"] = 100
if "equal_accuracy_for_all_output_categories" not in kwargs:
kwargs["equal_accuracy_for_all_output_categories"] = True
if "sample_margin_of_error" not in kwargs:
kwargs["sample_margin_of_error"] = 0.005
if "unstable_parameters_dict" not in kwargs:
kwargs["unstable_parameters_dict"] = {}
if "use_selfaware_model" not in kwargs["unstable_parameters_dict"]:
kwargs["unstable_parameters_dict"]["use_selfaware_model"] = False
try:
retrain = data.get("retrain")
if retrain in ("true", "True"):
retrain = True
else:
retrain = False
except:
retrain = None
ds_name = (
data.get("data_source_name")
if data.get("data_source_name") is not None
else data.get("from_data")
)
from_data = g.default_store.get_datasource_obj(ds_name)
if retrain is True:
original_name = name
name = name + "_retrained"
g.mindsdb_native.learn(name, from_data, to_predict, kwargs)
if retrain is True:
try:
model_swapping_map[original_name] = True
g.mindsdb_native.delete_model(original_name)
g.mindsdb_native.rename_model(name, original_name)
model_swapping_map[original_name] = False
except:
model_swapping_map[original_name] = False
return "", 200
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def get(self, name):
"""List of predictors colums"""
try:
model = ca.mindsdb_native.get_model_data(name)
except Exception:
abort(404, "Invalid predictor name")
columns = []
for array, is_target_array in [
(model["data_analysis"]["target_columns_metadata"], True),
(model["data_analysis"]["input_columns_metadata"], False),
]:
for col_data in array:
column = {
"name": col_data["column_name"],
"data_type": col_data["data_type"].lower(),
"is_target_column": is_target_array,
}
if column["data_type"] == "categorical":
column["distribution"] = col_data["data_distribution"][
"data_histogram"
]["x"]
columns.append(column)
return columns, 200
|
def get(self, name):
"""List of predictors colums"""
try:
model = g.mindsdb_native.get_model_data(name)
except Exception:
abort(404, "Invalid predictor name")
columns = []
for array, is_target_array in [
(model["data_analysis"]["target_columns_metadata"], True),
(model["data_analysis"]["input_columns_metadata"], False),
]:
for col_data in array:
column = {
"name": col_data["column_name"],
"data_type": col_data["data_type"].lower(),
"is_target_column": is_target_array,
}
if column["data_type"] == "categorical":
column["distribution"] = col_data["data_distribution"][
"data_histogram"
]["x"]
columns.append(column)
return columns, 200
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def post(self, name):
"""Queries predictor"""
global model_swapping_map
data = request.json
when = data.get("when") or {}
try:
format_flag = data.get("format_flag")
except:
format_flag = "explain"
try:
kwargs = data.get("kwargs")
except:
kwargs = {}
if type(kwargs) != type({}):
kwargs = {}
# Not the fanciest semaphor, but should work since restplus is multi-threaded and this condition should rarely be reached
while name in model_swapping_map and model_swapping_map[name] is True:
time.sleep(1)
results = ca.mindsdb_native.predict(name, when=when, **kwargs)
# return '', 500
return preparse_results(results, format_flag)
|
def post(self, name):
"""Queries predictor"""
global model_swapping_map
data = request.json
when = data.get("when") or {}
try:
format_flag = data.get("format_flag")
except:
format_flag = "explain"
try:
kwargs = data.get("kwargs")
except:
kwargs = {}
if type(kwargs) != type({}):
kwargs = {}
# Not the fanciest semaphor, but should work since restplus is multi-threaded and this condition should rarely be reached
while name in model_swapping_map and model_swapping_map[name] is True:
time.sleep(1)
results = g.mindsdb_native.predict(name, when=when, **kwargs)
# return '', 500
return preparse_results(results, format_flag)
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def post(self, name):
global model_swapping_map
data = request.json
from_data = ca.default_store.get_datasource_obj(data.get("data_source_name"))
try:
format_flag = data.get("format_flag")
except:
format_flag = "explain"
try:
kwargs = data.get("kwargs")
except:
kwargs = {}
if type(kwargs) != type({}):
kwargs = {}
if from_data is None:
from_data = data.get("from_data")
if from_data is None:
from_data = data.get("when_data")
if from_data is None:
abort(400, "No valid datasource given")
# Not the fanciest semaphor, but should work since restplus is multi-threaded and this condition should rarely be reached
while name in model_swapping_map and model_swapping_map[name] is True:
time.sleep(1)
results = ca.mindsdb_native.predict(name, when_data=from_data, **kwargs)
return preparse_results(results, format_flag)
|
def post(self, name):
global model_swapping_map
data = request.json
from_data = g.default_store.get_datasource_obj(data.get("data_source_name"))
try:
format_flag = data.get("format_flag")
except:
format_flag = "explain"
try:
kwargs = data.get("kwargs")
except:
kwargs = {}
if type(kwargs) != type({}):
kwargs = {}
if from_data is None:
from_data = data.get("from_data")
if from_data is None:
from_data = data.get("when_data")
if from_data is None:
abort(400, "No valid datasource given")
# Not the fanciest semaphor, but should work since restplus is multi-threaded and this condition should rarely be reached
while name in model_swapping_map and model_swapping_map[name] is True:
time.sleep(1)
results = g.mindsdb_native.predict(name, when_data=from_data, **kwargs)
return preparse_results(results, format_flag)
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def post(self):
"""Upload existing predictor"""
predictor_file = request.files["file"]
# @TODO: Figure out how to remove
fpath = os.path.join(mindsdb.CONFIG.MINDSDB_TEMP_PATH, "new.zip")
with open(fpath, "wb") as f:
f.write(predictor_file.read())
ca.mindsdb_native.load_model(fpath)
try:
os.remove(fpath)
except Exception:
pass
return "", 200
|
def post(self):
"""Upload existing predictor"""
predictor_file = request.files["file"]
# @TODO: Figure out how to remove
fpath = os.path.join(mindsdb.CONFIG.MINDSDB_TEMP_PATH, "new.zip")
with open(fpath, "wb") as f:
f.write(predictor_file.read())
g.mindsdb_native.load_model(fpath)
try:
os.remove(fpath)
except Exception:
pass
return "", 200
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def get(self, name):
"""Export predictor to file"""
try:
new_name = request.args.get("new_name")
ca.mindsdb_native.rename_model(name, new_name)
except Exception as e:
return str(e), 400
return f"Renamed model to {new_name}", 200
|
def get(self, name):
"""Export predictor to file"""
try:
new_name = request.args.get("new_name")
g.mindsdb_native.rename_model(name, new_name)
except Exception as e:
return str(e), 400
return f"Renamed model to {new_name}", 200
|
https://github.com/mindsdb/mindsdb/issues/516
|
2020-06-25 14:30:32,318 - INFO - Starting MindsDB Mysql proxy server on tcp://127.0.0.1:47335
2020-06-25 14:30:32,334 - INFO - Waiting for incoming connections...
127.0.0.1 - - [25/Jun/2020 14:31:14] "GET /util/ping HTTP/1.1" 200 -
Exception on /datasources/ [GET]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 40, in get
return g.default_store.get_datasources()
File "/home/zoran/MyProjects/mindsdb-examples/new/lib/python3.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: '_AppCtxGlobals' object has no attribute 'default_store'
127.0.0.1 - - [25/Jun/2020 14:31:18] "GET /datasources/ HTTP/1.1" 500 -
Exception on /datasources/ [GET]
|
AttributeError
|
def configure(config):
"""
| name | example | purpose |
| ---- | ------- | ------- |
| enabled\\_by\\_default | True | Enable URL safety in all channels where it isn't explicitly disabled. |
| known\\_good | sopel.chat,dftba.net | List of "known good" domains to ignore. |
| vt\\_api\\_key | 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef | Optional VirusTotal API key to improve malicious URL detection |
"""
config.define_section("safety", SafetySection)
config.safety.configure_setting(
"enabled_by_default",
"Enable URL safety in channels that don't specifically disable it?",
)
config.safety.configure_setting(
"known_good",
"Enter any domains to whitelist",
)
config.safety.configure_setting(
"vt_api_key",
"Optionally, enter a VirusTotal API key to improve malicious URL "
"protection.\nOtherwise, only the StevenBlack list will be used.",
)
|
def configure(config):
"""
| name | example | purpose |
| ---- | ------- | ------- |
| enabled\\_by\\_default | True | Enable URL safety in all channels where it isn't explicitly disabled. |
| known\\_good | sopel.chat,dftba.net | List of "known good" domains to ignore. |
| vt\\_api\\_key | 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef | Optional VirusTotal API key to improve malicious URL detection |
"""
config.define_section("safety", SafetySection)
config.safety.configure_setting(
"enabled_by_default",
"Enable URL safety in channels that don't specifically disable it?",
)
config.safety.configure_setting(
"known_good",
"Enter any domains to whitelist",
)
config.safety.configure_setting(
"vt_api_key",
"Optionally, enter a VirusTotal API key to improve malicious URL "
"protection.\nOtherwise, only the Malwarebytes DB will be used.",
)
|
https://github.com/sopel-irc/sopel/issues/2008
|
[2020-12-22 13:35:56,136] sopel.modules.safety INFO - Downloading malwaredomains db from https://mirror1.malwaredomains.com/files/justdomains
[2020-12-22 13:35:56,332] sopel.bot ERROR - Error in safety setup: HTTP Error 404: Not Found
Traceback (most recent call last):
File "/mnt/c/Users/dgw/github/sopel/sopel/bot.py", line 310, in setup_plugins
plugin.setup(self)
File "/mnt/c/Users/dgw/github/sopel/sopel/plugins/handlers.py", line 265, in setup
self._module.setup(bot)
File "/mnt/c/Users/dgw/github/sopel/sopel/modules/safety.py", line 98, in setup
_download_malwaredomains_db(loc)
File "/mnt/c/Users/dgw/github/sopel/sopel/modules/safety.py", line 116, in _download_malwaredomains_db
urlretrieve(url, path)
File "/usr/lib/python3.6/urllib/request.py", line 248, in urlretrieve
with contextlib.closing(urlopen(url, data)) as fp:
File "/usr/lib/python3.6/urllib/request.py", line 223, in urlopen
return opener.open(url, data, timeout)
File "/usr/lib/python3.6/urllib/request.py", line 532, in open
response = meth(req, response)
File "/usr/lib/python3.6/urllib/request.py", line 642, in http_response
'http', request, response, code, msg, hdrs)
File "/usr/lib/python3.6/urllib/request.py", line 570, in error
return self._call_chain(*args)
File "/usr/lib/python3.6/urllib/request.py", line 504, in _call_chain
result = func(*args)
File "/usr/lib/python3.6/urllib/request.py", line 650, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 404: Not Found
|
urllib.error.HTTPError
|
def setup(bot):
bot.config.define_section("safety", SafetySection)
if "safety_cache" not in bot.memory:
bot.memory["safety_cache"] = tools.SopelMemory()
if "safety_cache_lock" not in bot.memory:
bot.memory["safety_cache_lock"] = threading.Lock()
for item in bot.config.safety.known_good:
known_good.append(re.compile(item, re.I))
old_file = os.path.join(bot.config.homedir, "malwaredomains.txt")
if os.path.exists(old_file) and os.path.isfile(old_file):
LOGGER.info("Removing old malwaredomains file from %s", old_file)
try:
os.remove(old_file)
except Exception as err:
# for lack of a more specific error type...
# Python on Windows throws an exception if the file is in use
LOGGER.info("Could not delete %s: %s", old_file, str(err))
loc = os.path.join(bot.config.homedir, "unsafedomains.txt")
if os.path.isfile(loc):
if os.path.getmtime(loc) < time.time() - 24 * 60 * 60:
# File exists but older than one day — update it
_download_domain_list(loc)
else:
_download_domain_list(loc)
with open(loc, "r") as f:
for line in f:
clean_line = unicode(line).strip().lower()
if not clean_line or clean_line[0] == "#":
# blank line or comment
continue
parts = clean_line.split(" ", 1)
try:
domain = parts[1]
except IndexError:
# line does not contain a hosts entry; skip it
continue
if "." in domain:
# only publicly routable domains matter; skip loopback/link-local stuff
malware_domains.add(domain)
|
def setup(bot):
bot.config.define_section("safety", SafetySection)
if "safety_cache" not in bot.memory:
bot.memory["safety_cache"] = tools.SopelMemory()
if "safety_cache_lock" not in bot.memory:
bot.memory["safety_cache_lock"] = threading.Lock()
for item in bot.config.safety.known_good:
known_good.append(re.compile(item, re.I))
loc = os.path.join(bot.config.homedir, "malwaredomains.txt")
if os.path.isfile(loc):
if os.path.getmtime(loc) < time.time() - 24 * 60 * 60 * 7:
# File exists but older than one week — update it
_download_malwaredomains_db(loc)
else:
_download_malwaredomains_db(loc)
with open(loc, "r") as f:
for line in f:
clean_line = unicode(line).strip().lower()
if clean_line != "":
malware_domains.add(clean_line)
|
https://github.com/sopel-irc/sopel/issues/2008
|
[2020-12-22 13:35:56,136] sopel.modules.safety INFO - Downloading malwaredomains db from https://mirror1.malwaredomains.com/files/justdomains
[2020-12-22 13:35:56,332] sopel.bot ERROR - Error in safety setup: HTTP Error 404: Not Found
Traceback (most recent call last):
File "/mnt/c/Users/dgw/github/sopel/sopel/bot.py", line 310, in setup_plugins
plugin.setup(self)
File "/mnt/c/Users/dgw/github/sopel/sopel/plugins/handlers.py", line 265, in setup
self._module.setup(bot)
File "/mnt/c/Users/dgw/github/sopel/sopel/modules/safety.py", line 98, in setup
_download_malwaredomains_db(loc)
File "/mnt/c/Users/dgw/github/sopel/sopel/modules/safety.py", line 116, in _download_malwaredomains_db
urlretrieve(url, path)
File "/usr/lib/python3.6/urllib/request.py", line 248, in urlretrieve
with contextlib.closing(urlopen(url, data)) as fp:
File "/usr/lib/python3.6/urllib/request.py", line 223, in urlopen
return opener.open(url, data, timeout)
File "/usr/lib/python3.6/urllib/request.py", line 532, in open
response = meth(req, response)
File "/usr/lib/python3.6/urllib/request.py", line 642, in http_response
'http', request, response, code, msg, hdrs)
File "/usr/lib/python3.6/urllib/request.py", line 570, in error
return self._call_chain(*args)
File "/usr/lib/python3.6/urllib/request.py", line 504, in _call_chain
result = func(*args)
File "/usr/lib/python3.6/urllib/request.py", line 650, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 404: Not Found
|
urllib.error.HTTPError
|
def url_handler(bot, trigger):
"""Checks for malicious URLs"""
check = True # Enable URL checking
strict = False # Strict mode: kick on malicious URL
positives = 0 # Number of engines saying it's malicious
total = 0 # Number of total engines
use_vt = True # Use VirusTotal
check = bot.config.safety.enabled_by_default
if check is None:
# If not set, assume default
check = True
# DB overrides config:
setting = bot.db.get_channel_value(trigger.sender, "safety")
if setting is not None:
if setting == "off":
return # Not checking
elif setting in ["on", "strict", "local", "local strict"]:
check = True
if setting == "strict" or setting == "local strict":
strict = True
if setting == "local" or setting == "local strict":
use_vt = False
if not check:
return # Not overridden by DB, configured default off
try:
netloc = urlparse(trigger.group(1)).netloc
except ValueError:
return # Invalid IPv6 URL
if any(regex.search(netloc) for regex in known_good):
return # Whitelisted
apikey = bot.config.safety.vt_api_key
try:
if apikey is not None and use_vt:
payload = {"resource": unicode(trigger), "apikey": apikey, "scan": "1"}
if trigger not in bot.memory["safety_cache"]:
r = requests.post(vt_base_api_url + "report", data=payload)
r.raise_for_status()
result = r.json()
fetched = time.time()
if all(k in result for k in ["positives", "total"]):
# cache result only if it contains a scan report
# TODO: handle checking back for results from queued scans
data = {
"positives": result["positives"],
"total": result["total"],
"fetched": fetched,
}
bot.memory["safety_cache"][trigger] = data
if len(bot.memory["safety_cache"]) >= (2 * cache_limit):
_clean_cache(bot)
else:
LOGGER.debug("using cache")
result = bot.memory["safety_cache"][trigger]
positives = result.get("positives", 0)
total = result.get("total", 0)
except requests.exceptions.RequestException:
# Ignoring exceptions with VT so domain list will always work
LOGGER.debug("[VirusTotal] Error obtaining response.", exc_info=True)
except InvalidJSONResponse:
# Ignoring exceptions with VT so domain list will always work
LOGGER.debug("[VirusTotal] Malformed response (invalid JSON).", exc_info=True)
if unicode(netloc).lower() in malware_domains:
positives += 1
total += 1
if positives >= 1:
# Possibly malicious URL detected!
confidence = "{}%".format(round((positives / total) * 100))
msg = "link posted by %s is possibly malicious " % formatting.bold(trigger.nick)
msg += "(confidence %s - %s/%s)" % (confidence, positives, total)
warning = formatting.bold(formatting.color("WARNING:", "red"))
bot.say(warning + " " + msg)
if strict:
bot.kick(trigger.nick, trigger.sender, "Posted a malicious link")
|
def url_handler(bot, trigger):
"""Checks for malicious URLs"""
check = True # Enable URL checking
strict = False # Strict mode: kick on malicious URL
positives = 0 # Number of engines saying it's malicious
total = 0 # Number of total engines
use_vt = True # Use VirusTotal
check = bot.config.safety.enabled_by_default
if check is None:
# If not set, assume default
check = True
# DB overrides config:
setting = bot.db.get_channel_value(trigger.sender, "safety")
if setting is not None:
if setting == "off":
return # Not checking
elif setting in ["on", "strict", "local", "local strict"]:
check = True
if setting == "strict" or setting == "local strict":
strict = True
if setting == "local" or setting == "local strict":
use_vt = False
if not check:
return # Not overridden by DB, configured default off
try:
netloc = urlparse(trigger.group(1)).netloc
except ValueError:
return # Invalid IPv6 URL
if any(regex.search(netloc) for regex in known_good):
return # Whitelisted
apikey = bot.config.safety.vt_api_key
try:
if apikey is not None and use_vt:
payload = {"resource": unicode(trigger), "apikey": apikey, "scan": "1"}
if trigger not in bot.memory["safety_cache"]:
r = requests.post(vt_base_api_url + "report", data=payload)
r.raise_for_status()
result = r.json()
fetched = time.time()
if all(k in result for k in ["positives", "total"]):
# cache result only if it contains a scan report
# TODO: handle checking back for results from queued scans
data = {
"positives": result["positives"],
"total": result["total"],
"fetched": fetched,
}
bot.memory["safety_cache"][trigger] = data
if len(bot.memory["safety_cache"]) >= (2 * cache_limit):
_clean_cache(bot)
else:
LOGGER.debug("using cache")
result = bot.memory["safety_cache"][trigger]
positives = result.get("positives", 0)
total = result.get("total", 0)
except requests.exceptions.RequestException:
# Ignoring exceptions with VT so MalwareDomains will always work
LOGGER.debug("[VirusTotal] Error obtaining response.", exc_info=True)
except InvalidJSONResponse:
# Ignoring exceptions with VT so MalwareDomains will always work
LOGGER.debug("[VirusTotal] Malformed response (invalid JSON).", exc_info=True)
if unicode(netloc).lower() in malware_domains:
# malwaredomains is more trustworthy than some VT engines
# therefore it gets a weight of 10 engines when calculating confidence
positives += 10
total += 10
if positives > 1:
# Possibly malicious URL detected!
confidence = "{}%".format(round((positives / total) * 100))
msg = "link posted by %s is possibly malicious " % formatting.bold(trigger.nick)
msg += "(confidence %s - %s/%s)" % (confidence, positives, total)
warning = formatting.bold(formatting.color("WARNING:", "red"))
bot.say(warning + " " + msg)
if strict:
bot.kick(trigger.nick, trigger.sender, "Posted a malicious link")
|
https://github.com/sopel-irc/sopel/issues/2008
|
[2020-12-22 13:35:56,136] sopel.modules.safety INFO - Downloading malwaredomains db from https://mirror1.malwaredomains.com/files/justdomains
[2020-12-22 13:35:56,332] sopel.bot ERROR - Error in safety setup: HTTP Error 404: Not Found
Traceback (most recent call last):
File "/mnt/c/Users/dgw/github/sopel/sopel/bot.py", line 310, in setup_plugins
plugin.setup(self)
File "/mnt/c/Users/dgw/github/sopel/sopel/plugins/handlers.py", line 265, in setup
self._module.setup(bot)
File "/mnt/c/Users/dgw/github/sopel/sopel/modules/safety.py", line 98, in setup
_download_malwaredomains_db(loc)
File "/mnt/c/Users/dgw/github/sopel/sopel/modules/safety.py", line 116, in _download_malwaredomains_db
urlretrieve(url, path)
File "/usr/lib/python3.6/urllib/request.py", line 248, in urlretrieve
with contextlib.closing(urlopen(url, data)) as fp:
File "/usr/lib/python3.6/urllib/request.py", line 223, in urlopen
return opener.open(url, data, timeout)
File "/usr/lib/python3.6/urllib/request.py", line 532, in open
response = meth(req, response)
File "/usr/lib/python3.6/urllib/request.py", line 642, in http_response
'http', request, response, code, msg, hdrs)
File "/usr/lib/python3.6/urllib/request.py", line 570, in error
return self._call_chain(*args)
File "/usr/lib/python3.6/urllib/request.py", line 504, in _call_chain
result = func(*args)
File "/usr/lib/python3.6/urllib/request.py", line 650, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 404: Not Found
|
urllib.error.HTTPError
|
def url_handler(bot, trigger):
"""Checks for malicious URLs"""
check = True # Enable URL checking
strict = False # Strict mode: kick on malicious URL
positives = 0 # Number of engines saying it's malicious
total = 0 # Number of total engines
use_vt = True # Use VirusTotal
check = bot.config.safety.enabled_by_default
if check is None:
# If not set, assume default
check = True
# DB overrides config:
setting = bot.db.get_channel_value(trigger.sender, "safety")
if setting is not None:
if setting == "off":
return # Not checking
elif setting in ["on", "strict", "local", "local strict"]:
check = True
if setting == "strict" or setting == "local strict":
strict = True
if setting == "local" or setting == "local strict":
use_vt = False
if not check:
return # Not overridden by DB, configured default off
try:
netloc = urlparse(trigger.group(1)).netloc
except ValueError:
return # Invalid IPv6 URL
if any(regex.search(netloc) for regex in known_good):
return # Whitelisted
apikey = bot.config.safety.vt_api_key
try:
if apikey is not None and use_vt:
payload = {"resource": unicode(trigger), "apikey": apikey, "scan": "1"}
if trigger not in bot.memory["safety_cache"]:
r = requests.post(vt_base_api_url + "report", data=payload)
r.raise_for_status()
result = r.json()
fetched = time.time()
if all(k in result for k in ["positives", "total"]):
# cache result only if it contains a scan report
# TODO: handle checking back for results from queued scans
data = {
"positives": result["positives"],
"total": result["total"],
"fetched": fetched,
}
bot.memory["safety_cache"][trigger] = data
if len(bot.memory["safety_cache"]) >= (2 * cache_limit):
_clean_cache(bot)
else:
print("using cache")
result = bot.memory["safety_cache"][trigger]
positives = result.get("positives", 0)
total = result.get("total", 0)
except requests.exceptions.RequestException:
# Ignoring exceptions with VT so MalwareDomains will always work
LOGGER.debug("[VirusTotal] Error obtaining response.", exc_info=True)
except InvalidJSONResponse:
# Ignoring exceptions with VT so MalwareDomains will always work
LOGGER.debug("[VirusTotal] Malformed response (invalid JSON).", exc_info=True)
if unicode(netloc).lower() in malware_domains:
# malwaredomains is more trustworthy than some VT engines
# therefore it gets a weight of 10 engines when calculating confidence
positives += 10
total += 10
if positives > 1:
# Possibly malicious URL detected!
confidence = "{}%".format(round((positives / total) * 100))
msg = "link posted by %s is possibly malicious " % bold(trigger.nick)
msg += "(confidence %s - %s/%s)" % (confidence, positives, total)
bot.say("[" + bold(color("WARNING", "red")) + "] " + msg)
if strict:
bot.kick(trigger.nick, trigger.sender, "Posted a malicious link")
|
def url_handler(bot, trigger):
"""Checks for malicious URLs"""
check = True # Enable URL checking
strict = False # Strict mode: kick on malicious URL
positives = 0 # Number of engines saying it's malicious
total = 0 # Number of total engines
use_vt = True # Use VirusTotal
check = bot.config.safety.enabled_by_default
if check is None:
# If not set, assume default
check = True
# DB overrides config:
setting = bot.db.get_channel_value(trigger.sender, "safety")
if setting is not None:
if setting == "off":
return # Not checking
elif setting in ["on", "strict", "local", "local strict"]:
check = True
if setting == "strict" or setting == "local strict":
strict = True
if setting == "local" or setting == "local strict":
use_vt = False
if not check:
return # Not overridden by DB, configured default off
try:
netloc = urlparse(trigger.group(1)).netloc
except ValueError:
return # Invalid IPv6 URL
if any(regex.search(netloc) for regex in known_good):
return # Whitelisted
apikey = bot.config.safety.vt_api_key
try:
if apikey is not None and use_vt:
payload = {"resource": unicode(trigger), "apikey": apikey, "scan": "1"}
if trigger not in bot.memory["safety_cache"]:
r = requests.post(vt_base_api_url + "report", data=payload)
r.raise_for_status()
result = r.json()
fetched = time.time()
data = {
"positives": result["positives"],
"total": result["total"],
"fetched": fetched,
}
bot.memory["safety_cache"][trigger] = data
if len(bot.memory["safety_cache"]) >= (2 * cache_limit):
_clean_cache(bot)
else:
print("using cache")
result = bot.memory["safety_cache"][trigger]
positives = result["positives"]
total = result["total"]
except requests.exceptions.RequestException:
# Ignoring exceptions with VT so MalwareDomains will always work
LOGGER.debug("[VirusTotal] Error obtaining response.", exc_info=True)
except InvalidJSONResponse:
# Ignoring exceptions with VT so MalwareDomains will always work
LOGGER.debug("[VirusTotal] Malformed response (invalid JSON).", exc_info=True)
if unicode(netloc).lower() in malware_domains:
# malwaredomains is more trustworthy than some VT engines
# therefore it gets a weight of 10 engines when calculating confidence
positives += 10
total += 10
if positives > 1:
# Possibly malicious URL detected!
confidence = "{}%".format(round((positives / total) * 100))
msg = "link posted by %s is possibly malicious " % bold(trigger.nick)
msg += "(confidence %s - %s/%s)" % (confidence, positives, total)
bot.say("[" + bold(color("WARNING", "red")) + "] " + msg)
if strict:
bot.kick(trigger.nick, trigger.sender, "Posted a malicious link")
|
https://github.com/sopel-irc/sopel/issues/1850
|
[2020-04-21 21:47:29,752] sopel.bot ERROR - Unexpected error ('positives') from Ad at 2020-04-21 21:47:29.752477. Message was: https://www.cnn.com/2020/04/21/uk/uk-death-toll-coronavirus-statistics-gbr-intl/index.html
Traceback (most recent call last):
File "/home/sopel/.local/lib/python3.8/site-packages/sopel-7.0.0-py3.8.egg/sopel/bot.py", line 590, in call
exit_code = func(sopel, trigger)
File "/home/sopel/.local/lib/python3.8/site-packages/sopel-7.0.0-py3.8.egg/sopel/modules/safety.py", line 166, in url_handler
data = {'positives': result['positives'],
KeyError: 'positives'
|
KeyError
|
def __init__(self, config):
# MySQL - mysql://username:password@localhost/db
# SQLite - sqlite:////home/sopel/.sopel/default.db
db_type = config.core.db_type
# Handle SQLite explicitly as a default
if db_type == "sqlite":
path = config.core.db_filename
config_dir, config_file = os.path.split(config.filename)
config_name, _ = os.path.splitext(config_file)
if path is None:
path = os.path.join(config_dir, config_name + ".db")
path = os.path.expanduser(path)
if not os.path.isabs(path):
path = os.path.normpath(os.path.join(config_dir, path))
self.filename = path
self.url = "sqlite:///%s" % path
# Otherwise, handle all other database engines
else:
if db_type == "mysql":
drivername = config.core.db_driver or "mysql"
elif db_type == "postgres":
drivername = config.core.db_driver or "postgresql"
elif db_type == "oracle":
drivername = config.core.db_driver or "oracle"
elif db_type == "mssql":
drivername = config.core.db_driver or "mssql+pymssql"
elif db_type == "firebird":
drivername = config.core.db_driver or "firebird+fdb"
elif db_type == "sybase":
drivername = config.core.db_driver or "sybase+pysybase"
else:
raise Exception("Unknown db_type")
db_user = config.core.db_user
db_pass = config.core.db_pass
db_host = config.core.db_host
db_port = config.core.db_port # Optional
db_name = config.core.db_name # Optional, depending on DB
# Ensure we have all our variables defined
if db_user is None or db_pass is None or db_host is None:
raise Exception(
"Please make sure the following core "
"configuration values are defined: "
"db_user, db_pass, db_host"
)
self.url = URL(
drivername=drivername,
username=db_user,
password=db_pass,
host=db_host,
port=db_port,
database=db_name,
)
self.engine = create_engine(self.url)
# Catch any errors connecting to database
try:
self.engine.connect()
except OperationalError:
print("OperationalError: Unable to connect to database.")
raise
# Create our tables
BASE.metadata.create_all(self.engine)
self.ssession = scoped_session(sessionmaker(bind=self.engine))
|
def __init__(self, config):
path = config.core.db_filename
config_dir, config_file = os.path.split(config.filename)
config_name, _ = os.path.splitext(config_file)
if path is None:
path = os.path.join(config_dir, config_name + ".db")
path = os.path.expanduser(path)
if not os.path.isabs(path):
path = os.path.normpath(os.path.join(config_dir, path))
self.filename = path
self._create()
|
https://github.com/sopel-irc/sopel/issues/736
|
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 150, in set_nick_value
nick_id = self.get_nick_id(nick)
File "/home/TrollOP/willie/willie/db.py", line 114, in get_nick_id
[slug]).fetchone()
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/weather.py", line 129, in weather
woeid = bot.db.get_nick_value(trigger.nick, 'woeid')
File "/home/TrollOP/willie/willie/db.py", line 161, in get_nick_value
[nick.lower(), key]
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 152, in set_nick_value
[nick_id, key, value])
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
|
OperationalError
|
def connect(self):
"""Return a raw database connection object."""
return self.engine.connect()
|
def connect(self):
"""Return a raw database connection object."""
return sqlite3.connect(self.filename, timeout=10)
|
https://github.com/sopel-irc/sopel/issues/736
|
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 150, in set_nick_value
nick_id = self.get_nick_id(nick)
File "/home/TrollOP/willie/willie/db.py", line 114, in get_nick_id
[slug]).fetchone()
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/weather.py", line 129, in weather
woeid = bot.db.get_nick_value(trigger.nick, 'woeid')
File "/home/TrollOP/willie/willie/db.py", line 161, in get_nick_value
[nick.lower(), key]
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 152, in set_nick_value
[nick_id, key, value])
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
|
OperationalError
|
def execute(self, *args, **kwargs):
"""Execute an arbitrary SQL query against the database.
Returns a cursor object, on which things like `.fetchall()` can be
called per PEP 249."""
with self.connect() as conn:
return conn.execute(*args, **kwargs)
|
def execute(self, *args, **kwargs):
"""Execute an arbitrary SQL query against the database.
Returns a cursor object, on which things like `.fetchall()` can be
called per PEP 249."""
with self.connect() as conn:
cur = conn.cursor()
return cur.execute(*args, **kwargs)
|
https://github.com/sopel-irc/sopel/issues/736
|
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 150, in set_nick_value
nick_id = self.get_nick_id(nick)
File "/home/TrollOP/willie/willie/db.py", line 114, in get_nick_id
[slug]).fetchone()
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/weather.py", line 129, in weather
woeid = bot.db.get_nick_value(trigger.nick, 'woeid')
File "/home/TrollOP/willie/willie/db.py", line 161, in get_nick_value
[nick.lower(), key]
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 152, in set_nick_value
[nick_id, key, value])
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
|
OperationalError
|
def get_nick_id(self, nick, create=True):
"""Return the internal identifier for a given nick.
This identifier is unique to a user, and shared across all of that
user's aliases. If create is True, a new ID will be created if one does
not already exist"""
session = self.ssession()
slug = nick.lower()
try:
nickname = session.query(Nicknames).filter(Nicknames.slug == slug).one_or_none()
if nickname is None:
if not create:
raise ValueError("No ID exists for the given nick")
# Generate a new ID
nick_id = NickIDs()
session.add(nick_id)
session.commit()
# Create a new Nickname
nickname = Nicknames(nick_id=nick_id.nick_id, slug=slug, canonical=nick)
session.add(nickname)
session.commit()
return nickname.nick_id
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
|
def get_nick_id(self, nick, create=True):
"""Return the internal identifier for a given nick.
This identifier is unique to a user, and shared across all of that
user's aliases. If create is True, a new ID will be created if one does
not already exist"""
slug = nick.lower()
nick_id = self.execute(
"SELECT nick_id from nicknames where slug = ?", [slug]
).fetchone()
if nick_id is None:
if not create:
raise ValueError("No ID exists for the given nick")
with self.connect() as conn:
cur = conn.cursor()
cur.execute("INSERT INTO nick_ids VALUES (NULL)")
nick_id = cur.execute("SELECT last_insert_rowid()").fetchone()[0]
cur.execute(
"INSERT INTO nicknames (nick_id, slug, canonical) VALUES (?, ?, ?)",
[nick_id, slug, nick],
)
nick_id = self.execute(
"SELECT nick_id from nicknames where slug = ?", [slug]
).fetchone()
return nick_id[0]
|
https://github.com/sopel-irc/sopel/issues/736
|
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 150, in set_nick_value
nick_id = self.get_nick_id(nick)
File "/home/TrollOP/willie/willie/db.py", line 114, in get_nick_id
[slug]).fetchone()
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/weather.py", line 129, in weather
woeid = bot.db.get_nick_value(trigger.nick, 'woeid')
File "/home/TrollOP/willie/willie/db.py", line 161, in get_nick_value
[nick.lower(), key]
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 152, in set_nick_value
[nick_id, key, value])
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
|
OperationalError
|
def alias_nick(self, nick, alias):
"""Create an alias for a nick.
Raises ValueError if the alias already exists. If nick does not already
exist, it will be added along with the alias."""
nick = Identifier(nick)
alias = Identifier(alias)
nick_id = self.get_nick_id(nick)
session = self.ssession()
try:
result = (
session.query(Nicknames)
.filter(Nicknames.slug == alias.lower())
.filter(Nicknames.canonical == alias)
.one_or_none()
)
if result:
raise ValueError("Given alias is the only entry in its group.")
nickname = Nicknames(nick_id=nick_id, slug=alias.lower(), canonical=alias)
session.add(nickname)
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
|
def alias_nick(self, nick, alias):
"""Create an alias for a nick.
Raises ValueError if the alias already exists. If nick does not already
exist, it will be added along with the alias."""
nick = Identifier(nick)
alias = Identifier(alias)
nick_id = self.get_nick_id(nick)
sql = "INSERT INTO nicknames (nick_id, slug, canonical) VALUES (?, ?, ?)"
values = [nick_id, alias.lower(), alias]
try:
self.execute(sql, values)
except sqlite3.IntegrityError:
raise ValueError("Alias already exists.")
|
https://github.com/sopel-irc/sopel/issues/736
|
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 150, in set_nick_value
nick_id = self.get_nick_id(nick)
File "/home/TrollOP/willie/willie/db.py", line 114, in get_nick_id
[slug]).fetchone()
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/weather.py", line 129, in weather
woeid = bot.db.get_nick_value(trigger.nick, 'woeid')
File "/home/TrollOP/willie/willie/db.py", line 161, in get_nick_value
[nick.lower(), key]
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 152, in set_nick_value
[nick_id, key, value])
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
|
OperationalError
|
def set_nick_value(self, nick, key, value):
"""Sets the value for a given key to be associated with the nick."""
nick = Identifier(nick)
value = json.dumps(value, ensure_ascii=False)
nick_id = self.get_nick_id(nick)
session = self.ssession()
try:
result = (
session.query(NickValues)
.filter(NickValues.nick_id == nick_id)
.filter(NickValues.key == key)
.one_or_none()
)
# NickValue exists, update
if result:
result.value = value
session.commit()
# DNE - Insert
else:
new_nickvalue = NickValues(nick_id=nick_id, key=key, value=value)
session.add(new_nickvalue)
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
|
def set_nick_value(self, nick, key, value):
"""Sets the value for a given key to be associated with the nick."""
nick = Identifier(nick)
value = json.dumps(value, ensure_ascii=False)
nick_id = self.get_nick_id(nick)
self.execute(
"INSERT OR REPLACE INTO nick_values VALUES (?, ?, ?)", [nick_id, key, value]
)
|
https://github.com/sopel-irc/sopel/issues/736
|
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 150, in set_nick_value
nick_id = self.get_nick_id(nick)
File "/home/TrollOP/willie/willie/db.py", line 114, in get_nick_id
[slug]).fetchone()
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/weather.py", line 129, in weather
woeid = bot.db.get_nick_value(trigger.nick, 'woeid')
File "/home/TrollOP/willie/willie/db.py", line 161, in get_nick_value
[nick.lower(), key]
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 152, in set_nick_value
[nick_id, key, value])
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
|
OperationalError
|
def get_nick_value(self, nick, key):
"""Retrieves the value for a given key associated with a nick."""
nick = Identifier(nick)
session = self.ssession()
try:
result = (
session.query(NickValues)
.filter(Nicknames.nick_id == NickValues.nick_id)
.filter(Nicknames.slug == nick.lower())
.filter(NickValues.key == key)
.one_or_none()
)
if result is not None:
result = result.value
return _deserialize(result)
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
|
def get_nick_value(self, nick, key):
"""Retrieves the value for a given key associated with a nick."""
nick = Identifier(nick)
result = self.execute(
"SELECT value FROM nicknames JOIN nick_values "
"ON nicknames.nick_id = nick_values.nick_id "
"WHERE slug = ? AND key = ?",
[nick.lower(), key],
).fetchone()
if result is not None:
result = result[0]
return _deserialize(result)
|
https://github.com/sopel-irc/sopel/issues/736
|
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 150, in set_nick_value
nick_id = self.get_nick_id(nick)
File "/home/TrollOP/willie/willie/db.py", line 114, in get_nick_id
[slug]).fetchone()
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/weather.py", line 129, in weather
woeid = bot.db.get_nick_value(trigger.nick, 'woeid')
File "/home/TrollOP/willie/willie/db.py", line 161, in get_nick_value
[nick.lower(), key]
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 152, in set_nick_value
[nick_id, key, value])
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
|
OperationalError
|
def unalias_nick(self, alias):
"""Removes an alias.
Raises ValueError if there is not at least one other nick in the group.
To delete an entire group, use `delete_group`.
"""
alias = Identifier(alias)
nick_id = self.get_nick_id(alias, False)
session = self.ssession()
try:
count = session.query(Nicknames).filter(Nicknames.nick_id == nick_id).count()
if count <= 1:
raise ValueError("Given alias is the only entry in its group.")
session.query(Nicknames).filter(Nicknames.slug == alias.lower()).delete()
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
|
def unalias_nick(self, alias):
"""Removes an alias.
Raises ValueError if there is not at least one other nick in the group.
To delete an entire group, use `delete_group`.
"""
alias = Identifier(alias)
nick_id = self.get_nick_id(alias, False)
count = self.execute(
"SELECT COUNT(*) FROM nicknames WHERE nick_id = ?", [nick_id]
).fetchone()[0]
if count <= 1:
raise ValueError("Given alias is the only entry in its group.")
self.execute("DELETE FROM nicknames WHERE slug = ?", [alias.lower()])
|
https://github.com/sopel-irc/sopel/issues/736
|
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 150, in set_nick_value
nick_id = self.get_nick_id(nick)
File "/home/TrollOP/willie/willie/db.py", line 114, in get_nick_id
[slug]).fetchone()
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/weather.py", line 129, in weather
woeid = bot.db.get_nick_value(trigger.nick, 'woeid')
File "/home/TrollOP/willie/willie/db.py", line 161, in get_nick_value
[nick.lower(), key]
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 152, in set_nick_value
[nick_id, key, value])
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
|
OperationalError
|
def delete_nick_group(self, nick):
"""Removes a nickname, and all associated aliases and settings."""
nick = Identifier(nick)
nick_id = self.get_nick_id(nick, False)
session = self.ssession()
try:
session.query(Nicknames).filter(Nicknames.nick_id == nick_id).delete()
session.query(NickValues).filter(NickValues.nick_id == nick_id).delete()
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
|
def delete_nick_group(self, nick):
"""Removes a nickname, and all associated aliases and settings."""
nick = Identifier(nick)
nick_id = self.get_nick_id(nick, False)
self.execute("DELETE FROM nicknames WHERE nick_id = ?", [nick_id])
self.execute("DELETE FROM nick_values WHERE nick_id = ?", [nick_id])
|
https://github.com/sopel-irc/sopel/issues/736
|
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 150, in set_nick_value
nick_id = self.get_nick_id(nick)
File "/home/TrollOP/willie/willie/db.py", line 114, in get_nick_id
[slug]).fetchone()
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/weather.py", line 129, in weather
woeid = bot.db.get_nick_value(trigger.nick, 'woeid')
File "/home/TrollOP/willie/willie/db.py", line 161, in get_nick_value
[nick.lower(), key]
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 152, in set_nick_value
[nick_id, key, value])
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
|
OperationalError
|
def merge_nick_groups(self, first_nick, second_nick):
"""Merges the nick groups for the specified nicks.
Takes two nicks, which may or may not be registered. Unregistered
nicks will be registered. Keys which are set for only one of the given
nicks will be preserved. Where multiple nicks have values for a given
key, the value set for the first nick will be used.
Note that merging of data only applies to the native key-value store.
If modules define their own tables which rely on the nick table, they
will need to have their merging done separately."""
first_id = self.get_nick_id(Identifier(first_nick))
second_id = self.get_nick_id(Identifier(second_nick))
session = self.ssession()
try:
# Get second_id's values
res = session.query(NickValues).filter(NickValues.nick_id == second_id).all()
# Update first_id with second_id values if first_id doesn't have that key
for row in res:
first_res = (
session.query(NickValues)
.filter(NickValues.nick_id == first_id)
.filter(NickValues.key == row.key)
.one_or_none()
)
if not first_res:
self.set_nick_value(first_nick, row.key, _deserialize(row.value))
session.query(NickValues).filter(NickValues.nick_id == second_id).delete()
session.query(Nicknames).filter(Nicknames.nick_id == second_id).update(
{"nick_id": first_id}
)
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
|
def merge_nick_groups(self, first_nick, second_nick):
"""Merges the nick groups for the specified nicks.
Takes two nicks, which may or may not be registered. Unregistered
nicks will be registered. Keys which are set for only one of the given
nicks will be preserved. Where multiple nicks have values for a given
key, the value set for the first nick will be used.
Note that merging of data only applies to the native key-value store.
If modules define their own tables which rely on the nick table, they
will need to have their merging done separately."""
first_id = self.get_nick_id(Identifier(first_nick))
second_id = self.get_nick_id(Identifier(second_nick))
self.execute(
"UPDATE OR IGNORE nick_values SET nick_id = ? WHERE nick_id = ?",
[first_id, second_id],
)
self.execute("DELETE FROM nick_values WHERE nick_id = ?", [second_id])
self.execute(
"UPDATE nicknames SET nick_id = ? WHERE nick_id = ?", [first_id, second_id]
)
|
https://github.com/sopel-irc/sopel/issues/736
|
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 150, in set_nick_value
nick_id = self.get_nick_id(nick)
File "/home/TrollOP/willie/willie/db.py", line 114, in get_nick_id
[slug]).fetchone()
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/weather.py", line 129, in weather
woeid = bot.db.get_nick_value(trigger.nick, 'woeid')
File "/home/TrollOP/willie/willie/db.py", line 161, in get_nick_value
[nick.lower(), key]
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 152, in set_nick_value
[nick_id, key, value])
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
|
OperationalError
|
def set_channel_value(self, channel, key, value):
"""Sets the value for a given key to be associated with the channel."""
channel = Identifier(channel).lower()
value = json.dumps(value, ensure_ascii=False)
session = self.ssession()
try:
result = (
session.query(ChannelValues)
.filter(ChannelValues.channel == channel)
.filter(ChannelValues.key == key)
.one_or_none()
)
# ChannelValue exists, update
if result:
result.value = value
session.commit()
# DNE - Insert
else:
new_channelvalue = ChannelValues(channel=channel, key=key, value=value)
session.add(new_channelvalue)
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
|
def set_channel_value(self, channel, key, value):
"""Sets the value for a given key to be associated with the channel."""
channel = Identifier(channel).lower()
value = json.dumps(value, ensure_ascii=False)
self.execute(
"INSERT OR REPLACE INTO channel_values VALUES (?, ?, ?)", [channel, key, value]
)
|
https://github.com/sopel-irc/sopel/issues/736
|
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 150, in set_nick_value
nick_id = self.get_nick_id(nick)
File "/home/TrollOP/willie/willie/db.py", line 114, in get_nick_id
[slug]).fetchone()
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/weather.py", line 129, in weather
woeid = bot.db.get_nick_value(trigger.nick, 'woeid')
File "/home/TrollOP/willie/willie/db.py", line 161, in get_nick_value
[nick.lower(), key]
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 152, in set_nick_value
[nick_id, key, value])
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
|
OperationalError
|
def get_channel_value(self, channel, key):
"""Retrieves the value for a given key associated with a channel."""
channel = Identifier(channel).lower()
session = self.ssession()
try:
result = (
session.query(ChannelValues)
.filter(ChannelValues.channel == channel)
.filter(ChannelValues.key == key)
.one_or_none()
)
if result is not None:
result = result.value
return _deserialize(result)
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
|
def get_channel_value(self, channel, key):
"""Retrieves the value for a given key associated with a channel."""
channel = Identifier(channel).lower()
result = self.execute(
"SELECT value FROM channel_values WHERE channel = ? AND key = ?", [channel, key]
).fetchone()
if result is not None:
result = result[0]
return _deserialize(result)
|
https://github.com/sopel-irc/sopel/issues/736
|
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 150, in set_nick_value
nick_id = self.get_nick_id(nick)
File "/home/TrollOP/willie/willie/db.py", line 114, in get_nick_id
[slug]).fetchone()
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/weather.py", line 129, in weather
woeid = bot.db.get_nick_value(trigger.nick, 'woeid')
File "/home/TrollOP/willie/willie/db.py", line 161, in get_nick_value
[nick.lower(), key]
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 152, in set_nick_value
[nick_id, key, value])
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
|
OperationalError
|
def connect(self):
"""Return a raw database connection object."""
return sqlite3.connect(self.filename, timeout=10)
|
def connect(self):
"""Return a raw database connection object."""
return sqlite3.connect(self.filename)
|
https://github.com/sopel-irc/sopel/issues/736
|
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 150, in set_nick_value
nick_id = self.get_nick_id(nick)
File "/home/TrollOP/willie/willie/db.py", line 114, in get_nick_id
[slug]).fetchone()
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/weather.py", line 129, in weather
woeid = bot.db.get_nick_value(trigger.nick, 'woeid')
File "/home/TrollOP/willie/willie/db.py", line 161, in get_nick_value
[nick.lower(), key]
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
Traceback (most recent call last):
File "/home/TrollOP/willie/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/home/TrollOP/willie/willie/modules/seen.py", line 50, in note
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
File "/home/TrollOP/willie/willie/db.py", line 152, in set_nick_value
[nick_id, key, value])
File "/home/TrollOP/willie/willie/db.py", line 64, in execute
return cur.execute(*args, **kwargs)
OperationalError: database is locked
|
OperationalError
|
def run(config, pid_file, daemon=False):
import sopel.bot as bot
import sopel.logger
from sopel.tools import stderr
delay = 20
# Inject ca_certs from config to web for SSL validation of web requests
if not config.core.ca_certs:
stderr("Could not open CA certificates file. SSL will not work properly.")
def signal_handler(sig, frame):
if sig == signal.SIGUSR1 or sig == signal.SIGTERM or sig == signal.SIGINT:
stderr("Got quit signal, shutting down.")
p.quit("Closing")
# Define empty variable `p` for bot
p = None
while True:
if (
p and p.hasquit
): # Check if `hasquit` was set for bot during disconnected phase
break
try:
p = bot.Sopel(config, daemon=daemon)
if hasattr(signal, "SIGUSR1"):
signal.signal(signal.SIGUSR1, signal_handler)
if hasattr(signal, "SIGTERM"):
signal.signal(signal.SIGTERM, signal_handler)
if hasattr(signal, "SIGINT"):
signal.signal(signal.SIGINT, signal_handler)
sopel.logger.setup_logging(p)
p.run(config.core.host, int(config.core.port))
except KeyboardInterrupt:
break
except Exception: # TODO: Be specific
trace = traceback.format_exc()
try:
stderr(trace)
except Exception: # TODO: Be specific
pass
logfile = open(os.path.join(config.core.logdir, "exceptions.log"), "a")
logfile.write("Critical exception in core")
logfile.write(trace)
logfile.write("----------------------------------------\n\n")
logfile.close()
os.unlink(pid_file)
os._exit(1)
if not isinstance(delay, int):
break
if p.hasquit:
break
stderr("Warning: Disconnected. Reconnecting in %s seconds..." % delay)
time.sleep(delay)
os.unlink(pid_file)
os._exit(0)
|
def run(config, pid_file, daemon=False):
import sopel.bot as bot
import sopel.logger
from sopel.tools import stderr
delay = 20
# Inject ca_certs from config to web for SSL validation of web requests
if not config.core.ca_certs:
stderr("Could not open CA certificates file. SSL will not work properly.")
def signal_handler(sig, frame):
if sig == signal.SIGUSR1 or sig == signal.SIGTERM or sig == signal.SIGINT:
stderr("Got quit signal, shutting down.")
p.quit("Closing")
while True:
try:
p = bot.Sopel(config, daemon=daemon)
if hasattr(signal, "SIGUSR1"):
signal.signal(signal.SIGUSR1, signal_handler)
if hasattr(signal, "SIGTERM"):
signal.signal(signal.SIGTERM, signal_handler)
if hasattr(signal, "SIGINT"):
signal.signal(signal.SIGINT, signal_handler)
sopel.logger.setup_logging(p)
p.run(config.core.host, int(config.core.port))
except KeyboardInterrupt:
break
except Exception: # TODO: Be specific
trace = traceback.format_exc()
try:
stderr(trace)
except Exception: # TODO: Be specific
pass
logfile = open(os.path.join(config.core.logdir, "exceptions.log"), "a")
logfile.write("Critical exception in core")
logfile.write(trace)
logfile.write("----------------------------------------\n\n")
logfile.close()
os.unlink(pid_file)
os._exit(1)
if not isinstance(delay, int):
break
if p.hasquit:
break
stderr("Warning: Disconnected. Reconnecting in %s seconds..." % delay)
time.sleep(delay)
os.unlink(pid_file)
os._exit(0)
|
https://github.com/sopel-irc/sopel/issues/1478
|
Connecting to irc.network.net:6667...
^CGot quit signal, shutting down.
Traceback (most recent call last):
File "/Users/dgw/github/sopel/sopel/__init__.py", line 91, in run
p.run(config.core.host, int(config.core.port))
File "/Users/dgw/github/sopel/sopel/irc.py", line 167, in run
self.initiate_connect(host, port)
File "/Users/dgw/github/sopel/sopel/irc.py", line 177, in initiate_connect
source_address=source_address))
File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 707, in create_connection
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 748, in getaddrinfo
for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
File "/Users/dgw/github/sopel/sopel/__init__.py", line 73, in signal_handler
p.quit('Closing')
File "/Users/dgw/github/sopel/sopel/irc.py", line 199, in quit
self.write(['QUIT'], message)
File "/Users/dgw/github/sopel/sopel/bot.py", line 166, in write
irc.Bot.write(self, args, text=text)
File "/Users/dgw/github/sopel/sopel/irc.py", line 161, in write
self.send(temp.encode('utf-8'))
File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/asyncore.py", line 360, in send
result = self.socket.send(data)
AttributeError: 'NoneType' object has no attribute 'send'
|
AttributeError
|
def run(config, pid_file, daemon=False):
delay = 20
# Inject ca_certs from config to web for SSL validation of web requests
if not config.core.ca_certs:
tools.stderr("Could not open CA certificates file. SSL will not work properly!")
def signal_handler(sig, frame):
if sig == signal.SIGUSR1 or sig == signal.SIGTERM or sig == signal.SIGINT:
tools.stderr("Got quit signal, shutting down.")
p.quit("Closing")
elif sig == signal.SIGUSR2 or sig == signal.SIGILL:
tools.stderr("Got restart signal.")
p.restart("Restarting")
# Define empty variable `p` for bot
p = None
while True:
if (
p and p.hasquit
): # Check if `hasquit` was set for bot during disconnected phase
break
try:
p = bot.Sopel(config, daemon=daemon)
if hasattr(signal, "SIGUSR1"):
signal.signal(signal.SIGUSR1, signal_handler)
if hasattr(signal, "SIGTERM"):
signal.signal(signal.SIGTERM, signal_handler)
if hasattr(signal, "SIGINT"):
signal.signal(signal.SIGINT, signal_handler)
if hasattr(signal, "SIGUSR2"):
signal.signal(signal.SIGUSR2, signal_handler)
if hasattr(signal, "SIGILL"):
signal.signal(signal.SIGILL, signal_handler)
logger.setup_logging(p)
p.run(config.core.host, int(config.core.port))
except KeyboardInterrupt:
break
except Exception: # TODO: Be specific
trace = traceback.format_exc()
try:
tools.stderr(trace)
except Exception: # TODO: Be specific
pass
logfile = open(os.path.join(config.core.logdir, "exceptions.log"), "a")
logfile.write("Critical exception in core")
logfile.write(trace)
logfile.write("----------------------------------------\n\n")
logfile.close()
# TODO: This should be handled by command_start
# All we should need here is a return value, but replacing the
# os._exit() call below (at the end) broke ^C.
# This one is much harder to test, so until that one's sorted it
# isn't worth the risk of trying to remove this one.
os.unlink(pid_file)
os._exit(1)
if not isinstance(delay, int):
break
if p.wantsrestart:
return -1
if p.hasquit:
break
tools.stderr("Warning: Disconnected. Reconnecting in %s seconds..." % delay)
time.sleep(delay)
# TODO: This should be handled by command_start
# All we should need here is a return value, but making this
# a return makes Sopel hang on ^C after it says "Closed!"
os.unlink(pid_file)
os._exit(0)
|
def run(config, pid_file, daemon=False):
delay = 20
# Inject ca_certs from config to web for SSL validation of web requests
if not config.core.ca_certs:
tools.stderr("Could not open CA certificates file. SSL will not work properly!")
def signal_handler(sig, frame):
if sig == signal.SIGUSR1 or sig == signal.SIGTERM or sig == signal.SIGINT:
tools.stderr("Got quit signal, shutting down.")
p.quit("Closing")
elif sig == signal.SIGUSR2 or sig == signal.SIGILL:
tools.stderr("Got restart signal.")
p.restart("Restarting")
while True:
try:
p = bot.Sopel(config, daemon=daemon)
if hasattr(signal, "SIGUSR1"):
signal.signal(signal.SIGUSR1, signal_handler)
if hasattr(signal, "SIGTERM"):
signal.signal(signal.SIGTERM, signal_handler)
if hasattr(signal, "SIGINT"):
signal.signal(signal.SIGINT, signal_handler)
if hasattr(signal, "SIGUSR2"):
signal.signal(signal.SIGUSR2, signal_handler)
if hasattr(signal, "SIGILL"):
signal.signal(signal.SIGILL, signal_handler)
logger.setup_logging(p)
p.run(config.core.host, int(config.core.port))
except KeyboardInterrupt:
break
except Exception: # TODO: Be specific
trace = traceback.format_exc()
try:
tools.stderr(trace)
except Exception: # TODO: Be specific
pass
logfile = open(os.path.join(config.core.logdir, "exceptions.log"), "a")
logfile.write("Critical exception in core")
logfile.write(trace)
logfile.write("----------------------------------------\n\n")
logfile.close()
# TODO: This should be handled by command_start
# All we should need here is a return value, but replacing the
# os._exit() call below (at the end) broke ^C.
# This one is much harder to test, so until that one's sorted it
# isn't worth the risk of trying to remove this one.
os.unlink(pid_file)
os._exit(1)
if not isinstance(delay, int):
break
if p.wantsrestart:
return -1
if p.hasquit:
break
tools.stderr("Warning: Disconnected. Reconnecting in %s seconds..." % delay)
time.sleep(delay)
# TODO: This should be handled by command_start
# All we should need here is a return value, but making this
# a return makes Sopel hang on ^C after it says "Closed!"
os.unlink(pid_file)
os._exit(0)
|
https://github.com/sopel-irc/sopel/issues/1478
|
Connecting to irc.network.net:6667...
^CGot quit signal, shutting down.
Traceback (most recent call last):
File "/Users/dgw/github/sopel/sopel/__init__.py", line 91, in run
p.run(config.core.host, int(config.core.port))
File "/Users/dgw/github/sopel/sopel/irc.py", line 167, in run
self.initiate_connect(host, port)
File "/Users/dgw/github/sopel/sopel/irc.py", line 177, in initiate_connect
source_address=source_address))
File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 707, in create_connection
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 748, in getaddrinfo
for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
File "/Users/dgw/github/sopel/sopel/__init__.py", line 73, in signal_handler
p.quit('Closing')
File "/Users/dgw/github/sopel/sopel/irc.py", line 199, in quit
self.write(['QUIT'], message)
File "/Users/dgw/github/sopel/sopel/bot.py", line 166, in write
irc.Bot.write(self, args, text=text)
File "/Users/dgw/github/sopel/sopel/irc.py", line 161, in write
self.send(temp.encode('utf-8'))
File "/usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/asyncore.py", line 360, in send
result = self.socket.send(data)
AttributeError: 'NoneType' object has no attribute 'send'
|
AttributeError
|
def show_bug(bot, trigger, match=None):
"""Show information about a Bugzilla bug."""
match = match or trigger
domain = match.group(1)
if domain not in bot.config.bugzilla.domains:
return
url = "https://%s%sctype=xml&%s" % match.groups()
data = web.get(url, dont_decode=True)
bug = xmltodict.parse(data).get("bugzilla").get("bug")
error = bug.get("@error", None) # error="NotPermitted"
if error:
LOGGER.warning("Bugzilla error: %s" % error)
bot.say("[BUGZILLA] Unable to get infomation for linked bug (%s)" % error)
return
message = (
"[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | "
+ "Importance: %s | Status: %s | Assigned to: %s | "
+ "Reported: %s | Modified: %s"
)
resolution = bug.get("resolution")
if resolution is not None:
status = bug.get("bug_status") + " " + resolution
else:
status = bug.get("bug_status")
assigned_to = bug.get("assigned_to")
if isinstance(assigned_to, dict):
assigned_to = assigned_to.get("@name")
message = message % (
bug.get("short_desc"),
bug.get("product"),
bug.get("component"),
bug.get("version"),
(bug.get("priority") + " " + bug.get("bug_severity")),
status,
assigned_to,
bug.get("creation_ts"),
bug.get("delta_ts"),
)
bot.say(message)
|
def show_bug(bot, trigger, match=None):
"""Show information about a Bugzilla bug."""
match = match or trigger
domain = match.group(1)
if domain not in bot.config.bugzilla.domains:
return
url = "https://%s%sctype=xml&%s" % match.groups()
data = web.get(url, dont_decode=True)
bug = xmltodict.parse(data).get("bugzilla").get("bug")
error = bug.get("@error", None) # error="NotPermitted"
if error:
LOGGER.warning("Bugzilla error: %s", error)
return
message = (
"[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | "
+ "Importance: %s | Status: %s | Assigned to: %s | "
+ "Reported: %s | Modified: %s"
)
resolution = bug.get("resolution")
if resolution is not None:
status = bug.get("bug_status") + " " + resolution
else:
status = bug.get("bug_status")
assigned_to = bug.get("assigned_to")
if isinstance(assigned_to, dict):
assigned_to = assigned_to.get("@name")
message = message % (
bug.get("short_desc"),
bug.get("product"),
bug.get("component"),
bug.get("version"),
(bug.get("priority") + " " + bug.get("bug_severity")),
status,
assigned_to,
bug.get("creation_ts"),
bug.get("delta_ts"),
)
bot.say(message)
|
https://github.com/sopel-irc/sopel/issues/1112
|
Signature: TypeError: unsupported operand type(s) for +: 'NoneType' and 'unicode' (file "/usr/lib/python2.7/site-packages/sopel/modules/bugzilla.py", line 80, in show_bug)
from psachin at 2016-07-28 15:07:21.534591. Message was: https://bugzilla.redhat.com/show_bug.cgi?id=1334618#c15
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/sopel/bot.py", line 441, in call
exit_code = func(sopel, trigger)
File "/usr/lib/python2.7/site-packages/sopel/modules/bugzilla.py", line 80, in show_bug
(bug.get('priority') + ' ' + bug.get('bug_severity')),
TypeError: unsupported operand type(s) for +: 'NoneType' and 'unicode'
|
TypeError
|
def f_reload(bot, trigger):
"""Reloads a module, for use by admins only."""
if not trigger.admin:
return
name = trigger.group(2)
if name == bot.config.core.owner:
return bot.reply("What?")
if not name or name == "*" or name.upper() == "ALL THE THINGS":
bot._callables = {
"high": collections.defaultdict(list),
"medium": collections.defaultdict(list),
"low": collections.defaultdict(list),
}
bot._command_groups = collections.defaultdict(list)
bot.setup()
return bot.reply("done")
if name not in sys.modules:
return bot.reply("%s: not loaded, try the `load` command" % name)
old_module = sys.modules[name]
old_callables = {}
for obj_name, obj in iteritems(vars(old_module)):
bot.unregister(obj)
# Also remove all references to sopel callables from top level of the
# module, so that they will not get loaded again if reloading the
# module does not override them.
for obj_name in old_callables.keys():
delattr(old_module, obj_name)
# Also delete the setup function
if hasattr(old_module, "setup"):
delattr(old_module, "setup")
modules = sopel.loader.enumerate_modules(bot.config)
path, type_ = modules[name]
load_module(bot, name, path, type_)
|
def f_reload(bot, trigger):
"""Reloads a module, for use by admins only."""
if not trigger.admin:
return
name = trigger.group(2)
if name == bot.config.core.owner:
return bot.reply("What?")
if not name or name == "*" or name.upper() == "ALL THE THINGS":
bot._callables = {
"high": collections.defaultdict(list),
"medium": collections.defaultdict(list),
"low": collections.defaultdict(list),
}
bot.command_groups = collections.defaultdict(list)
bot.setup()
return bot.reply("done")
if name not in sys.modules:
return bot.reply("%s: not loaded, try the `load` command" % name)
old_module = sys.modules[name]
old_callables = {}
for obj_name, obj in iteritems(vars(old_module)):
bot.unregister(obj)
# Also remove all references to sopel callables from top level of the
# module, so that they will not get loaded again if reloading the
# module does not override them.
for obj_name in old_callables.keys():
delattr(old_module, obj_name)
# Also delete the setup function
if hasattr(old_module, "setup"):
delattr(old_module, "setup")
modules = sopel.loader.enumerate_modules(bot.config)
path, type_ = modules[name]
load_module(bot, name, path, type_)
|
https://github.com/sopel-irc/sopel/issues/1048
|
Traceback (most recent call last):
File "/home/caleb/sopel/sopel/bot.py", line 442, in call
exit_code = func(sopel, trigger)
File "/home/caleb/sopel/sopel/modules/reload.py", line 125, in pm_f_reload
f_reload(bot, trigger)
File "/home/caleb/sopel/sopel/modules/reload.py", line 38, in f_reload
bot.command_groups = collections.defaultdict(list)
File "/home/caleb/sopel/sopel/bot.py", line 398, in __setattr__
return setattr(self._bot, attr, value)
AttributeError: can't set attribute
|
AttributeError
|
def unregister(self, obj):
if not callable(obj):
return
if hasattr(obj, "rule"): # commands and intents have it added
for rule in obj.rule:
callb_list = self._callables[obj.priority][rule]
if obj in callb_list:
callb_list.remove(obj)
if hasattr(obj, "interval"):
# TODO this should somehow find the right job to remove, rather than
# clearing the entire queue. Issue #831
self.scheduler.clear_jobs()
if getattr(obj, "__name__", None) == "shutdown" and obj in self.shutdown_methods:
self.shutdown_methods.remove(obj)
|
def unregister(self, obj):
if hasattr(obj, "rule"): # commands and intents have it added
for rule in obj.rule:
self._callables[obj.priority][rule].remove(obj)
if hasattr(obj, "interval"):
# TODO this should somehow find the right job to remove, rather than
# clearing the entire queue. Issue #831
self.scheduler.clear_jobs()
if getattr(obj, "__name__", None) == "shutdown":
self.shutdown_methods.remove(obj)
|
https://github.com/sopel-irc/sopel/issues/899
|
Traceback (most recent call last):
File "/home/embo/prog/env/local/lib/python2.7/site-packages/sopel/bot.py", line 257, in call
exit_code = func(sopel, trigger)
File "/home/embo/prog/env/local/lib/python2.7/site-packages/sopel/modules/reload.py", line 49, in f_reload
bot.unregister(obj)
File "/home/embo/prog/env/local/lib/python2.7/site-packages/sopel/bot.py", line 169, in unregister
for rule in obj.rule:
TypeError: 'function' object is not iterable
|
TypeError
|
def compile_rule(nick, pattern):
# Not sure why this happens on reloads, but it shouldn't cause problems…
if isinstance(pattern, _regex_type):
return pattern
pattern = pattern.replace("$nickname", nick)
pattern = pattern.replace("$nick", r"{}[,:]\s+".format(nick))
flags = re.IGNORECASE
if "\n" in pattern:
flags |= re.VERBOSE
return re.compile(pattern, flags)
|
def compile_rule(nick, pattern):
pattern = pattern.replace("$nickname", nick)
pattern = pattern.replace("$nick", r"{}[,:]\s+".format(nick))
flags = re.IGNORECASE
if "\n" in pattern:
flags |= re.VERBOSE
return re.compile(pattern, flags)
|
https://github.com/sopel-irc/sopel/issues/899
|
Traceback (most recent call last):
File "/home/embo/prog/env/local/lib/python2.7/site-packages/sopel/bot.py", line 257, in call
exit_code = func(sopel, trigger)
File "/home/embo/prog/env/local/lib/python2.7/site-packages/sopel/modules/reload.py", line 49, in f_reload
bot.unregister(obj)
File "/home/embo/prog/env/local/lib/python2.7/site-packages/sopel/bot.py", line 169, in unregister
for rule in obj.rule:
TypeError: 'function' object is not iterable
|
TypeError
|
def setup(bot):
bot.config.define_section("admin", AdminSection)
|
def setup(bot):
return
bot.config.define_section("admin", AdminSection)
|
https://github.com/sopel-irc/sopel/issues/882
|
Traceback (most recent call last):
File "/home/embo/prog/willie/sopel/bot.py", line 257, in call
exit_code = func(sopel, trigger)
File "/home/embo/prog/willie/sopel/modules/admin.py", line 149, in hold_ground
if bot.config.admin.hold_ground:
File "/home/embo/prog/willie/sopel/config/__init__.py", line 168, in __getattr__
% (type(self).__name__, name))
AttributeError: 'Config' object has no attribute 'admin'
|
AttributeError
|
def configure_setting(self, name, prompt, default=NO_DEFAULT):
"""Return a validated value for this attribute from the terminal.
``prompt`` will be the docstring of the attribute if not given.
If ``default`` is passed, it will be used if no value is given by the
user. If it is not passed, the current value of the setting, or the
default value if it's unset, will be used. Note that if ``default`` is
passed, the current value of the setting will be ignored, even if it is
not the attribute's default.
"""
clazz = getattr(self.__class__, name)
if default is NO_DEFAULT:
try:
default = getattr(self, name)
except AttributeError:
pass
except ValueError:
print("The configured value for this option was invalid.")
if clazz.default is not NO_DEFAULT:
default = clazz.default
while True:
try:
value = clazz.configure(prompt, default)
except ValueError as exc:
print(exc)
else:
break
setattr(self, name, value)
|
def configure_setting(self, name, prompt=None, default=NO_DEFAULT):
"""Return a validated value for this attribute from the terminal.
``prompt`` will be the docstring of the attribute if not given.
If ``default`` is passed, it will be used if no value is given by the
user. If it is not passed, the current value of the setting, or the
default value if it's unset, will be used. Note that if ``default`` is
passed, the current value of the setting will be ignored, even if it is
not the attribute's default.
"""
clazz = getattr(self.__class__, name)
prompt = prompt or clazz.__doc__
if default is NO_DEFAULT:
try:
default = getattr(self, name)
except AttributeError:
pass
except ValueError:
print("The configured value for this option was invalid.")
if clazz.default is not NO_DEFAULT:
default = clazz.default
while True:
try:
value = clazz.configure(prompt, default)
except ValueError as exc:
print(exc)
else:
break
setattr(self, name, value)
|
https://github.com/sopel-irc/sopel/issues/864
|
Configure safety module (y/n)? [n]y
Encountered an error while writing the config file. This shouldn't happen. Check permissions.
Traceback (most recent call last):
File "/usr/local/bin/sopel", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.4/dist-packages/sopel/run_script.py", line 133, in main
_create_config(configpath)
File "/usr/local/lib/python3.4/dist-packages/sopel/config/__init__.py", line 254, in _create_config
config._modules()
File "/usr/local/lib/python3.4/dist-packages/sopel/config/__init__.py", line 207, in _modules
module.configure(self)
File "/usr/local/lib/python3.4/dist-packages/sopel/modules/safety.py", line 49, in configure
config.safety.configure_setting('enabled_by_default')
File "/usr/local/lib/python3.4/dist-packages/sopel/config/types.py", line 96, in configure_setting
value = clazz.configure(prompt, default)
File "/usr/local/lib/python3.4/dist-packages/sopel/config/types.py", line 205, in configure
prompt += ' (y/n)'
TypeError: unsupported operand type(s) for +=: 'NoneType' and 'str'
|
TypeError
|
def configure(config):
config.define_section("meetbot", MeetbotSection)
config.meetbot.configure_setting(
"meeting_log_path", "Enter the directory to store logs in."
)
config.meetbot.configure_setting(
"meeting_log_baseurl",
"Enter the base URL for the meeting logs.",
)
|
def configure(config):
config.define_section("meetbot", MeetbotSection)
config.meetbot.configure_setting("meeting_log_path")
config.meetbot.configure_setting("meeting_log_baseurl")
|
https://github.com/sopel-irc/sopel/issues/864
|
Configure safety module (y/n)? [n]y
Encountered an error while writing the config file. This shouldn't happen. Check permissions.
Traceback (most recent call last):
File "/usr/local/bin/sopel", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.4/dist-packages/sopel/run_script.py", line 133, in main
_create_config(configpath)
File "/usr/local/lib/python3.4/dist-packages/sopel/config/__init__.py", line 254, in _create_config
config._modules()
File "/usr/local/lib/python3.4/dist-packages/sopel/config/__init__.py", line 207, in _modules
module.configure(self)
File "/usr/local/lib/python3.4/dist-packages/sopel/modules/safety.py", line 49, in configure
config.safety.configure_setting('enabled_by_default')
File "/usr/local/lib/python3.4/dist-packages/sopel/config/types.py", line 96, in configure_setting
value = clazz.configure(prompt, default)
File "/usr/local/lib/python3.4/dist-packages/sopel/config/types.py", line 205, in configure
prompt += ' (y/n)'
TypeError: unsupported operand type(s) for +=: 'NoneType' and 'str'
|
TypeError
|
def configure(config):
config.define_section("safety", SafetySection)
config.safety.configure_setting(
"enabled_by_default",
"Enable URL safety in channels that don't specifically disable it?",
)
config.safety.configure_setting(
"known_good",
"Enter any domains to whitelist",
)
config.safety.configure_setting(
"vt_api_key",
"Optionaly, enter a VirusTotal API key to improve malicious URL "
"protection. Otherwise, only the Malwarebytes DB will be used.",
)
|
def configure(config):
config.define_section("safety", SafetySection)
config.safety.configure_setting("enabled_by_default")
config.safety.configure_setting("known_good")
config.safety.configure_setting(
"vt_api_key",
"Optionaly, enter a VirusTotal API key to improve malicious URL "
"protection. Otherwise, only the Malwarebytes DB will be used.",
)
|
https://github.com/sopel-irc/sopel/issues/864
|
Configure safety module (y/n)? [n]y
Encountered an error while writing the config file. This shouldn't happen. Check permissions.
Traceback (most recent call last):
File "/usr/local/bin/sopel", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.4/dist-packages/sopel/run_script.py", line 133, in main
_create_config(configpath)
File "/usr/local/lib/python3.4/dist-packages/sopel/config/__init__.py", line 254, in _create_config
config._modules()
File "/usr/local/lib/python3.4/dist-packages/sopel/config/__init__.py", line 207, in _modules
module.configure(self)
File "/usr/local/lib/python3.4/dist-packages/sopel/modules/safety.py", line 49, in configure
config.safety.configure_setting('enabled_by_default')
File "/usr/local/lib/python3.4/dist-packages/sopel/config/types.py", line 96, in configure_setting
value = clazz.configure(prompt, default)
File "/usr/local/lib/python3.4/dist-packages/sopel/config/types.py", line 205, in configure
prompt += ' (y/n)'
TypeError: unsupported operand type(s) for +=: 'NoneType' and 'str'
|
TypeError
|
def mw_info(bot, trigger, found_match=None):
"""
Retrives a snippet of the specified length from the given page on the given
server.
"""
match = found_match or trigger
say_snippet(bot, match.group(1), unquote(match.group(2)), show_url=False)
|
def mw_info(bot, trigger, found_match=None):
"""
Retrives a snippet of the specified length from the given page on the given
server.
"""
match = found_match or trigger
say_snippet(bot, match.group(1), match.group(2), show_url=False)
|
https://github.com/sopel-irc/sopel/issues/724
|
Signature: KeyError: u'extract' (file "/usr/local/lib/python2.7/dist-packages/willie/modules/wikipedia.py", line 83, in mw_snippet)
from Tomi at 2015-01-28 18:29:59.907542. Message was: http://en.wikipedia.org/wiki/Troll_%28Internet%29
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/willie/bot.py", line 634, in call
exit_code = func(willie, trigger)
File "/usr/local/lib/python2.7/dist-packages/willie/modules/wikipedia.py", line 93, in mw_info
say_snippet(bot, match.group(1), match.group(2), show_url=False)
File "/usr/local/lib/python2.7/dist-packages/willie/modules/wikipedia.py", line 60, in say_snippet
snippet = mw_snippet(server, query)
File "/usr/local/lib/python2.7/dist-packages/willie/modules/wikipedia.py", line 83, in mw_snippet
return snippet['extract']
KeyError: u'extract'
|
KeyError
|
def get_fpath(bot, trigger, channel=None):
"""
Returns a string corresponding to the path to the file where the message
currently being handled should be logged.
"""
basedir = os.path.expanduser(bot.config.chanlogs.dir)
channel = channel or trigger.sender
channel = channel.lstrip("#")
channel = BAD_CHARS.sub("__")
dt = datetime.utcnow()
if not bot.config.chanlogs.microseconds:
dt = dt.replace(microsecond=0)
if bot.config.chanlogs.by_day:
fname = "{channel}-{date}.log".format(
channel=channel, date=dt.date().isoformat()
)
else:
fname = "{channel}.log".format(channel=channel)
return os.path.join(basedir, fname)
|
def get_fpath(bot, trigger, channel=None):
"""
Returns a string corresponding to the path to the file where the message
currently being handled should be logged.
"""
basedir = os.path.expanduser(bot.config.chanlogs.dir)
channel = channel or trigger.sender
channel = channel.lstrip("#")
dt = datetime.utcnow()
if not bot.config.chanlogs.microseconds:
dt = dt.replace(microsecond=0)
if bot.config.chanlogs.by_day:
fname = "{channel}-{date}.log".format(
channel=channel, date=dt.date().isoformat()
)
else:
fname = "{channel}.log".format(channel=channel)
return os.path.join(basedir, fname)
|
https://github.com/sopel-irc/sopel/issues/490
|
Traceback (most recent call last):
File "/home/amphy/willie/willie/bot.py", line 747, in call
exit_code = func(willie, trigger)
File "/home/amphy/willie/willie/modules/chanlogs.py", line 115, in log_join
with open(fpath, "a") as f:
IOError: [Errno 2] No such file or directory: u'/vp/-2014-03-21.log'
|
IOError
|
def auth_proceed(bot, trigger):
if trigger.args[0] != "+":
# How did we get here? I am not good with computer.
return
# Is this right?
if bot.config.core.sasl_username:
sasl_username = bot.config.core.sasl_username
else:
sasl_username = bot.nick
sasl_token = "\0".join(
(sasl_username, sasl_username, bot.config.core.sasl_password)
)
# Spec says we do a base 64 encode on the SASL stuff
bot.write(("AUTHENTICATE", base64.b64encode(sasl_token.encode("utf-8"))))
|
def auth_proceed(bot, trigger):
if trigger.args[0] != "+":
# How did we get here? I am not good with computer.
return
# Is this right?
if bot.config.core.sasl_username:
sasl_username = bot.config.core.sasl_username
else:
sasl_username = bot.nick
sasl_token = "\0".join(
(sasl_username, sasl_username, bot.config.core.sasl_password)
)
# Spec says we do a base 64 encode on the SASL stuff
bot.write(("AUTHENTICATE", base64.b64encode(sasl_token)))
|
https://github.com/sopel-irc/sopel/issues/707
|
Traceback (most recent call last):
File "/opt/rh/python33/root/usr/lib/python3.3/site-packages/willie/bot.py", line 743, in call
exit_code = func(willie, trigger)
File "/opt/rh/python33/root/usr/lib/python3.3/site-packages/willie/coretasks.py", line 432, in auth_proceed
bot.write(('AUTHENTICATE', base64.b64encode(sasl_token)))
File "/opt/rh/python33/root/usr/lib64/python3.3/base64.py", line 58, in b64encode
raise TypeError("expected bytes, not %s" % s.__class__.__name__)
TypeError: expected bytes, not str
|
TypeError
|
def read_feeds(bot, force=False):
if not bot.memory["rss_manager"].running and not force:
return
sub = bot.db.substitution
conn = bot.db.connect()
c = conn.cursor()
c.execute("SELECT * FROM rss_feeds")
feeds = c.fetchall()
if not feeds:
bot.debug(__file__, "No RSS feeds to check.", "warning")
return
for feed_row in feeds:
feed = RSSFeed(feed_row)
if not feed.enabled:
continue
def disable_feed():
c.execute(
"""
UPDATE rss_feeds SET enabled = {0}
WHERE channel = {0} AND feed_name = {0}
""".format(sub),
(0, feed.channel, feed.name),
)
conn.commit()
try:
fp = feedparser.parse(feed.url, etag=feed.etag, modified=feed.modified)
except IOError as e:
bot.debug(
__file__,
"Can't parse feed on {0}, disabling ({1})".format(feed.name, str(e)),
"warning",
)
disable_feed()
continue
# fp.status will only exist if pulling from an online feed
status = getattr(fp, "status", None)
bot.debug(
feed.channel,
"{0}: status = {1}, version = '{2}', items = {3}".format(
feed.name, status, fp.version, len(fp.entries)
),
"verbose",
)
# check HTTP status
if status == 301: # MOVED_PERMANENTLY
bot.debug(
__file__,
"Got HTTP 301 (Moved Permanently) on {0}, updating URI to {1}".format(
feed.name, fp.href
),
"warning",
)
c.execute(
"""
UPDATE rss_feeds SET feed_url = {0}
WHERE channel = {0} AND feed_name = {0}
""".format(sub),
(fp.href, feed.channel, feed.name),
)
conn.commit()
elif status == 410: # GONE
bot.debug(
__file__,
"Got HTTP 410 (Gone) on {0}, disabling".format(feed.name),
"warning",
)
disable_feed()
if not fp.entries:
continue
feed_etag = getattr(fp, "etag", None)
feed_modified = getattr(fp, "modified", None)
entry = fp.entries[0]
# parse published and updated times into datetime objects (or None)
entry_dt = (
datetime.fromtimestamp(time.mktime(entry.published_parsed))
if hasattr(entry, "published_parsed")
else None
)
entry_update_dt = (
datetime.fromtimestamp(time.mktime(entry.updated_parsed))
if hasattr(entry, "updated_parsed")
else None
)
# check if article is new, and skip otherwise
if (
feed.title == entry.title
and feed.link == entry.link
and feed.etag == feed_etag
and feed.modified == feed_modified
):
bot.debug(
__file__,
"Skipping previously read entry: [{0}] {1}".format(
feed.name, entry.title
),
"verbose",
)
continue
# save article title, url, and modified date
c.execute(
"""
UPDATE rss_feeds
SET article_title = {0}, article_url = {0}, published = {0}, etag = {0}, modified = {0}
WHERE channel = {0} AND feed_name = {0}
""".format(sub),
(
entry.title,
entry.link,
entry_dt,
feed_etag,
feed_modified,
feed.channel,
feed.name,
),
)
conn.commit()
if feed.published and entry_dt:
published_dt = datetime.strptime(feed.published, "%Y-%m-%d %H:%M:%S")
if published_dt >= entry_dt:
# This will make more sense once iterating over the feed is
# implemented. Once that happens, deleting or modifying the
# latest item would result in the whole feed getting re-msg'd.
# This will prevent that from happening.
bot.debug(
__file__,
"Skipping older entry: [{0}] {1}, because {2} >= {3}".format(
feed.name, entry.title, published_dt, entry_dt
),
"verbose",
)
continue
# create message for new entry
message = "[\x02{0}\x02] \x02{1}\x02 {2}".format(
colour_text(feed.name, feed.fg, feed.bg), entry.title, entry.link
)
# append update time if it exists, or published time if it doesn't
timestamp = entry_update_dt or entry_dt
if timestamp:
# attempt to get time format from preferences
tformat = ""
if feed.channel in bot.db.preferences:
tformat = bot.db.preferences.get(feed.channel, "time_format") or tformat
if not tformat and bot.config.has_option("clock", "time_format"):
tformat = bot.config.clock.time_format
message += " - {0}".format(timestamp.strftime(tformat or "%F - %T%Z"))
# print message
bot.msg(feed.channel, message)
conn.close()
|
def read_feeds(bot, force=False):
if not bot.memory["rss_manager"].running and not force:
return
sub = bot.db.substitution
conn = bot.db.connect()
c = conn.cursor()
c.execute("SELECT * FROM rss_feeds")
feeds = c.fetchall()
if not feeds:
bot.debug(__file__, "No RSS feeds to check.", "warning")
return
for feed_row in feeds:
feed = RSSFeed(feed_row)
if not feed.enabled:
continue
def disable_feed():
c.execute(
"""
UPDATE rss_feeds SET enabled = {0}
WHERE channel = {0} AND feed_name = {0}
""".format(sub),
(0, feed.channel, feed.name),
)
conn.commit()
try:
fp = feedparser.parse(feed.url, etag=feed.etag, modified=feed.modified)
except IOError as e:
bot.debug(
__file__,
"Can't parse feed on {0}, disabling ({1})".format(feed.name, str(e)),
"warning",
)
disable_feed()
continue
# fp.status will only exist if pulling from an online feed
status = getattr(fp, "status", None)
bot.debug(
feed.channel,
"{0}: status = {1}, version = '{2}', items = {3}".format(
feed.name, status, fp.version, len(fp.entries)
),
"verbose",
)
# check for malformed XML
if fp.bozo:
bot.debug(
__file__,
"Got malformed feed on {0}, disabling ({1})".format(
feed.name, fp.bozo_exception.getMessage()
),
"warning",
)
disable_feed()
continue
# check HTTP status
if status == 301: # MOVED_PERMANENTLY
bot.debug(
__file__,
"Got HTTP 301 (Moved Permanently) on {0}, updating URI to {1}".format(
feed.name, fp.href
),
"warning",
)
c.execute(
"""
UPDATE rss_feeds SET feed_url = {0}
WHERE channel = {0} AND feed_name = {0}
""".format(sub),
(fp.href, feed.channel, feed.name),
)
conn.commit()
elif status == 410: # GONE
bot.debug(
__file__,
"Got HTTP 410 (Gone) on {0}, disabling".format(feed.name),
"warning",
)
disable_feed()
if not fp.entries:
continue
feed_etag = getattr(fp, "etag", None)
feed_modified = getattr(fp, "modified", None)
entry = fp.entries[0]
# parse published and updated times into datetime objects (or None)
entry_dt = (
datetime.fromtimestamp(time.mktime(entry.published_parsed))
if hasattr(entry, "published_parsed")
else None
)
entry_update_dt = (
datetime.fromtimestamp(time.mktime(entry.updated_parsed))
if hasattr(entry, "updated_parsed")
else None
)
# check if article is new, and skip otherwise
if (
feed.title == entry.title
and feed.link == entry.link
and feed.etag == feed_etag
and feed.modified == feed_modified
):
bot.debug(
__file__,
"Skipping previously read entry: [{0}] {1}".format(
feed.name, entry.title
),
"verbose",
)
continue
# save article title, url, and modified date
c.execute(
"""
UPDATE rss_feeds
SET article_title = {0}, article_url = {0}, published = {0}, etag = {0}, modified = {0}
WHERE channel = {0} AND feed_name = {0}
""".format(sub),
(
entry.title,
entry.link,
entry_dt,
feed_etag,
feed_modified,
feed.channel,
feed.name,
),
)
conn.commit()
if feed.published and entry_dt:
published_dt = datetime.strptime(feed.published, "%Y-%m-%d %H:%M:%S")
if published_dt >= entry_dt:
# This will make more sense once iterating over the feed is
# implemented. Once that happens, deleting or modifying the
# latest item would result in the whole feed getting re-msg'd.
# This will prevent that from happening.
bot.debug(
__file__,
"Skipping older entry: [{0}] {1}, because {2} >= {3}".format(
feed.name, entry.title, published_dt, entry_dt
),
"verbose",
)
continue
# create message for new entry
message = "[\x02{0}\x02] \x02{1}\x02 {2}".format(
colour_text(feed.name, feed.fg, feed.bg), entry.title, entry.link
)
# append update time if it exists, or published time if it doesn't
timestamp = entry_update_dt or entry_dt
if timestamp:
# attempt to get time format from preferences
tformat = ""
if feed.channel in bot.db.preferences:
tformat = bot.db.preferences.get(feed.channel, "time_format") or tformat
if not tformat and bot.config.has_option("clock", "time_format"):
tformat = bot.config.clock.time_format
message += " - {0}".format(timestamp.strftime(tformat or "%F - %T%Z"))
# print message
bot.msg(feed.channel, message)
conn.close()
|
https://github.com/sopel-irc/sopel/issues/449
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/willie/bot.py", line 719, in call
exit_code = func(willie, trigger)
File "/home/%USERNAME%/.willie/modules/rss.py", line 129, in manage_rss
bot.memory['rss_manager'].manage_rss(bot, trigger)
File "/home/%USERNAME%/.willie/modules/rss.py", line 161, in manage_rss
if getattr(self, '_rss_' + text[1])(bot, trigger, conn.cursor()):
File "/home/%USERNAME%/.willie/modules/rss.py", line 337, in _rss_fetch
read_feeds(bot, True)
File "/home/%USERNAME%/.willie/modules/rss.py", line 413, in read_feeds
feed.name, fp.bozo_exception.getMessage()), 'warning')
AttributeError: 'CharacterEncodingOverride' object has no attribute 'getMessage'
|
AttributeError
|
def track_part(bot, trigger):
if trigger.nick == bot.nick:
bot.channels.remove(trigger.sender)
del bot.privileges[trigger.sender]
else:
try:
del bot.privileges[trigger.sender][trigger.nick]
except KeyError:
pass
|
def track_part(bot, trigger):
if trigger.nick == bot.nick:
bot.channels.remove(trigger.sender)
del bot.privileges[trigger.sender]
else:
del bot.privileges[trigger.sender][trigger.nick]
|
https://github.com/sopel-irc/sopel/issues/365
|
Signature: KeyError: u'ict' (file "/home/ict/build/willie/willie/modules/adminchannel.py", line 28, in op)
from #mytestchannel at 2013-11-11 23:27:34.803790:
Message was: <ict> .op
Traceback (most recent call last):
File "/home/ict/build/willie/willie/bot.py", line 697, in call
exit_code = func(willie, trigger)
File "/home/ict/build/willie/willie/modules/adminchannel.py", line 28, in op
if (bot.privileges[trigger.sender][trigger.nick] >= OP) or trigger.admin or trigger.owner:
KeyError: u'ict'
|
KeyError
|
def show_bug(willie, trigger):
"""Show information about a Bugzilla bug."""
domain = trigger.group(1)
if domain not in willie.config.bugzilla.get_list("domains"):
return
url = "https://%s%sctype=xml&%s" % trigger.groups()
data = web.get(url)
bug = etree.fromstring(data).find("bug")
message = (
"[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | "
+ "Importance: %s | Status: %s | Assigned to: %s | "
+ "Reported: %s | Modified: %s"
)
resolution = bug.find("resolution")
if resolution is not None and resolution.text:
status = bug.find("bug_status").text + " " + resolution.text
else:
status = bug.find("bug_status").text
message = message % (
bug.find("short_desc").text,
bug.find("product").text,
bug.find("component").text,
bug.find("version").text,
(bug.find("priority").text + " " + bug.find("bug_severity").text),
status,
bug.find("assigned_to").text,
bug.find("creation_ts").text,
bug.find("delta_ts").text,
)
willie.say(message)
|
def show_bug(willie, trigger):
"""Show information about a Bugzilla bug."""
domain = trigger.group(1)
if domain not in willie.config.bugzilla.get_list("domains"):
return
url = "https://%s%sctype=xml&%s" % trigger.groups()
data = web.get(url)
bug = etree.fromstring(data).find("bug")
message = (
"[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | "
+ "Importance: %s | Status: %s | Assigned to: %s | "
+ "Reported: %s | Modified: %s"
)
if bug.find("resolution") is not None:
status = bug.find("bug_status").text + " " + bug.find("resolution").text
else:
status = bug.find("bug_status").text
message = message % (
bug.find("short_desc").text,
bug.find("product").text,
bug.find("component").text,
bug.find("version").text,
(bug.find("priority").text + " " + bug.find("bug_severity").text),
status,
bug.find("assigned_to").text,
bug.find("creation_ts").text,
bug.find("delta_ts").text,
)
willie.say(message)
|
https://github.com/sopel-irc/sopel/issues/247
|
Signature: TypeError: cannot concatenate 'str' and 'NoneType' objects (file "/home/elad/willie/willie/modules/bugzilla.py", line 60, in show_bug)
from #fedora-sound at 2013-05-05 15:08:37.018372:
Message was: <elad661> https://bugzilla.redhat.com/show_bug.cgi?id=951827
Traceback (most recent call last):
File "/home/elad/willie/willie/bot.py", line 420, in call
exit_code = func(willie, trigger)
File "/home/elad/willie/willie/modules/bugzilla.py", line 60, in show_bug
status = bug.find('bug_status').text + ' ' + bug.find('resolution').text
TypeError: cannot concatenate 'str' and 'NoneType' objects
----------------------------------------
|
TypeError
|
def resolve_connection(cls, connection, args, iterable, max_limit=None):
iterable = maybe_queryset(iterable)
if isinstance(iterable, QuerySet):
list_length = iterable.count()
list_slice_length = (
min(max_limit, list_length) if max_limit is not None else list_length
)
else:
list_length = len(iterable)
list_slice_length = (
min(max_limit, list_length) if max_limit is not None else list_length
)
# If after is higher than list_length, connection_from_list_slice
# would try to do a negative slicing which makes django throw an
# AssertionError
after = min(get_offset_with_default(args.get("after"), -1) + 1, list_length)
if max_limit is not None and "first" not in args:
args["first"] = max_limit
connection = connection_from_list_slice(
iterable[after:],
args,
slice_start=after,
list_length=list_length,
list_slice_length=list_slice_length,
connection_type=connection,
edge_type=connection.Edge,
pageinfo_type=PageInfo,
)
connection.iterable = iterable
connection.length = list_length
return connection
|
def resolve_connection(cls, connection, args, iterable, max_limit=None):
iterable = maybe_queryset(iterable)
if isinstance(iterable, QuerySet):
list_length = iterable.count()
list_slice_length = (
min(max_limit, list_length) if max_limit is not None else list_length
)
else:
list_length = len(iterable)
list_slice_length = (
min(max_limit, list_length) if max_limit is not None else list_length
)
after = get_offset_with_default(args.get("after"), -1) + 1
if max_limit is not None and "first" not in args:
args["first"] = max_limit
connection = connection_from_list_slice(
iterable[after:],
args,
slice_start=after,
list_length=list_length,
list_slice_length=list_slice_length,
connection_type=connection,
edge_type=connection.Edge,
pageinfo_type=PageInfo,
)
connection.iterable = iterable
connection.length = list_length
return connection
|
https://github.com/graphql-python/graphene-django/issues/998
|
Traceback (most recent call last):
File "/home/bellini/dev/cliqueimudei/cliqueimudei/backend/cm/urls.py", line 53, in execute_graphql_request
raise error.original_error
File "/home/bellini/.virtualenvs/cm/lib/python3.8/site-packages/promise/promise.py", line 489, in _resolve_from_executor
executor(resolve, reject)
File "/home/bellini/.virtualenvs/cm/lib/python3.8/site-packages/promise/promise.py", line 756, in executor
return resolve(f(*args, **kwargs))
File "/home/bellini/.virtualenvs/cm/lib/python3.8/site-packages/graphql/execution/middleware.py", line 75, in make_it_promise
return next(*args, **kwargs)
File "/home/bellini/.virtualenvs/cm/lib/python3.8/site-packages/graphene_django/fields.py", line 216, in connection_resolver
return on_resolve(iterable)
File "/home/bellini/.virtualenvs/cm/lib/python3.8/site-packages/graphene_django/fields.py", line 153, in resolve_connection
connection = connection_from_list_slice(
File "/home/bellini/.virtualenvs/cm/lib/python3.8/site-packages/graphql_relay/connection/arrayconnection.py", line 82, in connection_from_list_slice
_slice = list_slice[
File "/home/bellini/.virtualenvs/cm/lib/python3.8/site-packages/django/db/models/query.py", line 290, in __getitem__
assert ((not isinstance(k, slice) and (k >= 0)) or
AssertionError: Negative indexing is not supported.
|
AssertionError
|
def convert_django_field_with_choices(
field, registry=None, convert_choices_to_enum=True
):
if registry is not None:
converted = registry.get_converted_field(field)
if converted:
return converted
choices = getattr(field, "choices", None)
if choices and convert_choices_to_enum:
enum = convert_choice_field_to_enum(field)
required = not (field.blank or field.null)
converted = enum(
description=get_django_field_description(field), required=required
)
else:
converted = convert_django_field(field, registry)
if registry is not None:
registry.register_converted_field(field, converted)
return converted
|
def convert_django_field_with_choices(
field, registry=None, convert_choices_to_enum=True
):
if registry is not None:
converted = registry.get_converted_field(field)
if converted:
return converted
choices = getattr(field, "choices", None)
if choices and convert_choices_to_enum:
enum = convert_choice_field_to_enum(field)
required = not (field.blank or field.null)
converted = enum(description=field.help_text, required=required)
else:
converted = convert_django_field(field, registry)
if registry is not None:
registry.register_converted_field(field, converted)
return converted
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
def convert_field_to_string(field, registry=None):
return String(
description=get_django_field_description(field), required=not field.null
)
|
def convert_field_to_string(field, registry=None):
return String(description=field.help_text, required=not field.null)
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
def convert_field_to_id(field, registry=None):
return ID(description=get_django_field_description(field), required=not field.null)
|
def convert_field_to_id(field, registry=None):
return ID(description=field.help_text, required=not field.null)
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
def convert_field_to_uuid(field, registry=None):
return UUID(
description=get_django_field_description(field), required=not field.null
)
|
def convert_field_to_uuid(field, registry=None):
return UUID(description=field.help_text, required=not field.null)
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
def convert_field_to_int(field, registry=None):
return Int(description=get_django_field_description(field), required=not field.null)
|
def convert_field_to_int(field, registry=None):
return Int(description=field.help_text, required=not field.null)
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
def convert_field_to_boolean(field, registry=None):
return Boolean(
description=get_django_field_description(field), required=not field.null
)
|
def convert_field_to_boolean(field, registry=None):
return Boolean(description=field.help_text, required=not field.null)
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
def convert_field_to_float(field, registry=None):
return Float(
description=get_django_field_description(field), required=not field.null
)
|
def convert_field_to_float(field, registry=None):
return Float(description=field.help_text, required=not field.null)
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
def convert_datetime_to_string(field, registry=None):
return DateTime(
description=get_django_field_description(field), required=not field.null
)
|
def convert_datetime_to_string(field, registry=None):
return DateTime(description=field.help_text, required=not field.null)
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
def convert_date_to_string(field, registry=None):
return Date(
description=get_django_field_description(field), required=not field.null
)
|
def convert_date_to_string(field, registry=None):
return Date(description=field.help_text, required=not field.null)
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
def convert_time_to_string(field, registry=None):
return Time(
description=get_django_field_description(field), required=not field.null
)
|
def convert_time_to_string(field, registry=None):
return Time(description=field.help_text, required=not field.null)
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
def convert_field_to_djangomodel(field, registry=None):
model = field.related_model
def dynamic_type():
_type = registry.get_type_for_model(model)
if not _type:
return
return Field(
_type,
description=get_django_field_description(field),
required=not field.null,
)
return Dynamic(dynamic_type)
|
def convert_field_to_djangomodel(field, registry=None):
model = field.related_model
def dynamic_type():
_type = registry.get_type_for_model(model)
if not _type:
return
return Field(_type, description=field.help_text, required=not field.null)
return Dynamic(dynamic_type)
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
def dynamic_type():
_type = registry.get_type_for_model(model)
if not _type:
return
return Field(
_type,
description=get_django_field_description(field),
required=not field.null,
)
|
def dynamic_type():
_type = registry.get_type_for_model(model)
if not _type:
return
return Field(_type, description=field.help_text, required=not field.null)
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
def convert_postgres_array_to_list(field, registry=None):
base_type = convert_django_field(field.base_field)
if not isinstance(base_type, (List, NonNull)):
base_type = type(base_type)
return List(
base_type,
description=get_django_field_description(field),
required=not field.null,
)
|
def convert_postgres_array_to_list(field, registry=None):
base_type = convert_django_field(field.base_field)
if not isinstance(base_type, (List, NonNull)):
base_type = type(base_type)
return List(base_type, description=field.help_text, required=not field.null)
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
def convert_postgres_field_to_string(field, registry=None):
return JSONString(
description=get_django_field_description(field), required=not field.null
)
|
def convert_postgres_field_to_string(field, registry=None):
return JSONString(description=field.help_text, required=not field.null)
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
def convert_postgres_range_to_string(field, registry=None):
inner_type = convert_django_field(field.base_field)
if not isinstance(inner_type, (List, NonNull)):
inner_type = type(inner_type)
return List(
inner_type,
description=get_django_field_description(field),
required=not field.null,
)
|
def convert_postgres_range_to_string(field, registry=None):
inner_type = convert_django_field(field.base_field)
if not isinstance(inner_type, (List, NonNull)):
inner_type = type(inner_type)
return List(inner_type, description=field.help_text, required=not field.null)
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
def get_filtering_args_from_filterset(filterset_class, type):
"""Inspect a FilterSet and produce the arguments to pass to
a Graphene Field. These arguments will be available to
filter against in the GraphQL
"""
from ..forms.converter import convert_form_field
args = {}
model = filterset_class._meta.model
for name, filter_field in filterset_class.base_filters.items():
form_field = None
if name in filterset_class.declared_filters:
form_field = filter_field.field
else:
model_field = get_model_field(model, filter_field.field_name)
filter_type = filter_field.lookup_expr
if filter_type != "isnull" and hasattr(model_field, "formfield"):
form_field = model_field.formfield(
required=filter_field.extra.get("required", False)
)
# Fallback to field defined on filter if we can't get it from the
# model field
if not form_field:
form_field = filter_field.field
field_type = convert_form_field(form_field).Argument()
field_type.description = (
None if filter_field.label is None else str(filter_field.label)
)
args[name] = field_type
return args
|
def get_filtering_args_from_filterset(filterset_class, type):
"""Inspect a FilterSet and produce the arguments to pass to
a Graphene Field. These arguments will be available to
filter against in the GraphQL
"""
from ..forms.converter import convert_form_field
args = {}
model = filterset_class._meta.model
for name, filter_field in filterset_class.base_filters.items():
form_field = None
if name in filterset_class.declared_filters:
form_field = filter_field.field
else:
model_field = get_model_field(model, filter_field.field_name)
filter_type = filter_field.lookup_expr
if filter_type != "isnull" and hasattr(model_field, "formfield"):
form_field = model_field.formfield(
required=filter_field.extra.get("required", False)
)
# Fallback to field defined on filter if we can't get it from the
# model field
if not form_field:
form_field = filter_field.field
field_type = convert_form_field(form_field).Argument()
field_type.description = filter_field.label
args[name] = field_type
return args
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
def convert_form_field_to_string(field):
return String(
description=get_form_field_description(field), required=field.required
)
|
def convert_form_field_to_string(field):
return String(description=field.help_text, required=field.required)
|
https://github.com/graphql-python/graphene-django/issues/975
|
Traceback (most recent call last):
File "./manage.py", line 21, in <module>
main()
File "./manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 328, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 113, in handle
self.get_schema(schema, out, indent)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 70, in get_schema
self.save_graphql_file(out, schema)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphene_django/management/commands/graphql_schema.py", line 59, in save_graphql_file
outfile.write(print_schema(schema.graphql_schema))
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 39, in print_schema
return print_filtered_schema(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 61, in print_filtered_schema
"\n\n".join(
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 65, in <genexpr>
(print_type(type_) for type_ in types),
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 128, in print_type
return print_object(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 162, in print_object
+ print_fields(type_)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 198, in print_fields
fields = [
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 199, in <listcomp>
print_description(field, " ", not i)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/utilities/print_schema.py", line 282, in print_description
block_string = print_block_string(description, "", prefer_multiple_lines)
File "/home/username/.virtualenvs/project/lib/python3.8/site-packages/graphql/language/block_string.py", line 91, in print_block_string
result += value.replace("\n", "\n" + indentation) if indentation else value
TypeError: can only concatenate str (not "__proxy__") to str
|
TypeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.