after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def bitcast_convert_type(operand, new_dtype):
new_dtype = xla_bridge.canonicalize_dtype(new_dtype)
old_dtype = _dtype(operand)
if old_dtype != new_dtype:
return bitcast_convert_type_p.bind(operand, new_dtype=new_dtype)
else:
return operand
|
def bitcast_convert_type(operand, new_dtype):
return bitcast_convert_type_p.bind(operand, new_dtype=new_dtype)
|
https://github.com/google/jax/issues/108
|
import jax.numpy as np
from jax import vmap
vmap(np.any)(np.array([[True, False], [False, False]]))
jax/lib/xla_bridge.py:138: UserWarning: No GPU found, falling back to CPU.
warnings.warn('No GPU found, falling back to CPU.')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "jax/api.py", line 149, in batched_fun
out_flat = batching.batch(flat_fun, in_flat, in_axes_, out_axes)
File "jax/interpreters/batching.py", line 43, in batch
out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims)
File "jax/linear_util.py", line 85, in call_wrapped
ans = self.f(*args, **self.kwargs)
File "jax/numpy/lax_numpy.py", line 607, in reduction
result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims)
File "jax/lax.py", line 260, in reduce
dimensions=tuple(dimensions))
File "jax/core.py", line 74, in bind
out_tracer = top_trace.process_primitive(self, tracers, kwargs)
File "jax/interpreters/batching.py", line 119, in process_primitive
val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
TypeError: reducer_batcher() takes exactly 4 arguments (3 given)
|
TypeError
|
def reduce(operand, init_value, computation, dimensions):
monoid_reducer = _get_monoid_reducer(computation, init_value)
if monoid_reducer:
return monoid_reducer(operand, dimensions)
else:
jaxpr, consts = _reduction_jaxpr(computation, init_value)
return reduce_p.bind(
operand,
init_value,
computation=computation,
jaxpr=jaxpr,
consts=consts,
dimensions=tuple(dimensions),
)
|
def reduce(operand, init_value, computation, dimensions):
monoid_reducer = _get_monoid_reducer(computation, init_value)
if monoid_reducer:
return monoid_reducer(operand, dimensions)
else:
jaxpr, consts = _reduction_jaxpr(computation, init_value)
return reduce_p.bind(
operand,
init_value,
jaxpr=jaxpr,
consts=consts,
dimensions=tuple(dimensions),
)
|
https://github.com/google/jax/issues/108
|
import jax.numpy as np
from jax import vmap
vmap(np.any)(np.array([[True, False], [False, False]]))
jax/lib/xla_bridge.py:138: UserWarning: No GPU found, falling back to CPU.
warnings.warn('No GPU found, falling back to CPU.')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "jax/api.py", line 149, in batched_fun
out_flat = batching.batch(flat_fun, in_flat, in_axes_, out_axes)
File "jax/interpreters/batching.py", line 43, in batch
out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims)
File "jax/linear_util.py", line 85, in call_wrapped
ans = self.f(*args, **self.kwargs)
File "jax/numpy/lax_numpy.py", line 607, in reduction
result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims)
File "jax/lax.py", line 260, in reduce
dimensions=tuple(dimensions))
File "jax/core.py", line 74, in bind
out_tracer = top_trace.process_primitive(self, tracers, kwargs)
File "jax/interpreters/batching.py", line 119, in process_primitive
val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
TypeError: reducer_batcher() takes exactly 4 arguments (3 given)
|
TypeError
|
def _get_monoid_reducer(monoid_op, x):
aval = core.get_aval(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return aval.val == 0 and _reduce_sum
elif monoid_op is max:
return aval.val == _get_max_identity(aval.dtype) and _reduce_max
elif monoid_op is min:
return aval.val == _get_min_identity(aval.dtype) and _reduce_min
elif monoid_op is bitwise_or and aval.dtype == onp.bool_:
return aval.val == _get_max_identity(aval.dtype) and _reduce_or
elif monoid_op is bitwise_and and aval.dtype == onp.bool_:
return aval.val == _get_min_identity(aval.dtype) and _reduce_and
|
def _get_monoid_reducer(monoid_op, x):
aval = core.get_aval(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return aval.val == 0 and _reduce_sum
elif monoid_op is max:
return aval.val == _get_max_identity(aval.dtype) and _reduce_max
elif monoid_op is min:
return aval.val == _get_min_identity(aval.dtype) and _reduce_min
|
https://github.com/google/jax/issues/108
|
import jax.numpy as np
from jax import vmap
vmap(np.any)(np.array([[True, False], [False, False]]))
jax/lib/xla_bridge.py:138: UserWarning: No GPU found, falling back to CPU.
warnings.warn('No GPU found, falling back to CPU.')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "jax/api.py", line 149, in batched_fun
out_flat = batching.batch(flat_fun, in_flat, in_axes_, out_axes)
File "jax/interpreters/batching.py", line 43, in batch
out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims)
File "jax/linear_util.py", line 85, in call_wrapped
ans = self.f(*args, **self.kwargs)
File "jax/numpy/lax_numpy.py", line 607, in reduction
result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims)
File "jax/lax.py", line 260, in reduce
dimensions=tuple(dimensions))
File "jax/core.py", line 74, in bind
out_tracer = top_trace.process_primitive(self, tracers, kwargs)
File "jax/interpreters/batching.py", line 119, in process_primitive
val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
TypeError: reducer_batcher() takes exactly 4 arguments (3 given)
|
TypeError
|
def _get_max_identity(dtype):
if onp.issubdtype(dtype, onp.floating):
return onp.array(-onp.inf, dtype)
elif onp.issubdtype(dtype, onp.integer):
return onp.array(onp.iinfo(dtype).min, dtype)
elif onp.issubdtype(dtype, onp.bool_):
return onp.array(False, onp.bool_)
|
def _get_max_identity(dtype):
if onp.issubdtype(dtype, onp.floating):
return onp.array(-onp.inf, dtype)
elif onp.issubdtype(dtype, onp.integer):
return onp.array(onp.iinfo(dtype).min, dtype)
|
https://github.com/google/jax/issues/108
|
import jax.numpy as np
from jax import vmap
vmap(np.any)(np.array([[True, False], [False, False]]))
jax/lib/xla_bridge.py:138: UserWarning: No GPU found, falling back to CPU.
warnings.warn('No GPU found, falling back to CPU.')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "jax/api.py", line 149, in batched_fun
out_flat = batching.batch(flat_fun, in_flat, in_axes_, out_axes)
File "jax/interpreters/batching.py", line 43, in batch
out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims)
File "jax/linear_util.py", line 85, in call_wrapped
ans = self.f(*args, **self.kwargs)
File "jax/numpy/lax_numpy.py", line 607, in reduction
result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims)
File "jax/lax.py", line 260, in reduce
dimensions=tuple(dimensions))
File "jax/core.py", line 74, in bind
out_tracer = top_trace.process_primitive(self, tracers, kwargs)
File "jax/interpreters/batching.py", line 119, in process_primitive
val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
TypeError: reducer_batcher() takes exactly 4 arguments (3 given)
|
TypeError
|
def _get_min_identity(dtype):
if onp.issubdtype(dtype, onp.floating):
return onp.array(onp.inf, dtype)
elif onp.issubdtype(dtype, onp.integer):
return onp.array(onp.iinfo(dtype).max, dtype)
elif onp.issubdtype(dtype, onp.bool_):
return onp.array(True, onp.bool_)
|
def _get_min_identity(dtype):
if onp.issubdtype(dtype, onp.floating):
return onp.array(onp.inf, dtype)
elif onp.issubdtype(dtype, onp.integer):
return onp.array(onp.iinfo(dtype).max, dtype)
|
https://github.com/google/jax/issues/108
|
import jax.numpy as np
from jax import vmap
vmap(np.any)(np.array([[True, False], [False, False]]))
jax/lib/xla_bridge.py:138: UserWarning: No GPU found, falling back to CPU.
warnings.warn('No GPU found, falling back to CPU.')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "jax/api.py", line 149, in batched_fun
out_flat = batching.batch(flat_fun, in_flat, in_axes_, out_axes)
File "jax/interpreters/batching.py", line 43, in batch
out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims)
File "jax/linear_util.py", line 85, in call_wrapped
ans = self.f(*args, **self.kwargs)
File "jax/numpy/lax_numpy.py", line 607, in reduction
result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims)
File "jax/lax.py", line 260, in reduce
dimensions=tuple(dimensions))
File "jax/core.py", line 74, in bind
out_tracer = top_trace.process_primitive(self, tracers, kwargs)
File "jax/interpreters/batching.py", line 119, in process_primitive
val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
TypeError: reducer_batcher() takes exactly 4 arguments (3 given)
|
TypeError
|
def pad_batch_rule(batched_args, batch_dims, padding_config):
operand, padding_value = batched_args
operand_bdim, padding_value_bdim = batch_dims
if padding_value_bdim is None:
assert operand_bdim is not None
padding_config = list(padding_config)
padding_config.insert(operand_bdim, (0, 0, 0))
return pad(operand, padding_value, padding_config), operand_bdim
else:
raise NotImplementedError # loop and stack
|
def pad_batch_rule(batched_args, batch_dims, padding_config):
operand, padding_value = batched_args
operand_bdim, padding_value_bdim = batch_dims
if padding_value_bdim is None:
assert operand_bdim is not None
padding_config = list(padding_config)
padding_config.insert(operand_bdim, (0, 0, 0))
return pad(operand, padding_value, padding_config), operand_bdim
else:
raise NotImplementedError
|
https://github.com/google/jax/issues/108
|
import jax.numpy as np
from jax import vmap
vmap(np.any)(np.array([[True, False], [False, False]]))
jax/lib/xla_bridge.py:138: UserWarning: No GPU found, falling back to CPU.
warnings.warn('No GPU found, falling back to CPU.')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "jax/api.py", line 149, in batched_fun
out_flat = batching.batch(flat_fun, in_flat, in_axes_, out_axes)
File "jax/interpreters/batching.py", line 43, in batch
out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims)
File "jax/linear_util.py", line 85, in call_wrapped
ans = self.f(*args, **self.kwargs)
File "jax/numpy/lax_numpy.py", line 607, in reduction
result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims)
File "jax/lax.py", line 260, in reduce
dimensions=tuple(dimensions))
File "jax/core.py", line 74, in bind
out_tracer = top_trace.process_primitive(self, tracers, kwargs)
File "jax/interpreters/batching.py", line 119, in process_primitive
val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
TypeError: reducer_batcher() takes exactly 4 arguments (3 given)
|
TypeError
|
def reduce_shape_rule(operand, init_value, computation, jaxpr, consts, dimensions):
return tuple(onp.delete(operand.shape, dimensions))
|
def reduce_shape_rule(operand, init_value, jaxpr, consts, dimensions):
return tuple(onp.delete(operand.shape, dimensions))
|
https://github.com/google/jax/issues/108
|
import jax.numpy as np
from jax import vmap
vmap(np.any)(np.array([[True, False], [False, False]]))
jax/lib/xla_bridge.py:138: UserWarning: No GPU found, falling back to CPU.
warnings.warn('No GPU found, falling back to CPU.')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "jax/api.py", line 149, in batched_fun
out_flat = batching.batch(flat_fun, in_flat, in_axes_, out_axes)
File "jax/interpreters/batching.py", line 43, in batch
out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims)
File "jax/linear_util.py", line 85, in call_wrapped
ans = self.f(*args, **self.kwargs)
File "jax/numpy/lax_numpy.py", line 607, in reduction
result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims)
File "jax/lax.py", line 260, in reduce
dimensions=tuple(dimensions))
File "jax/core.py", line 74, in bind
out_tracer = top_trace.process_primitive(self, tracers, kwargs)
File "jax/interpreters/batching.py", line 119, in process_primitive
val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
TypeError: reducer_batcher() takes exactly 4 arguments (3 given)
|
TypeError
|
def reduce_translation_rule(
c, operand, init_value, computation, jaxpr, consts, dimensions
):
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
return c.Reduce(operand, init_value, xla_computation, dimensions)
|
def reduce_translation_rule(c, operand, init_value, jaxpr, consts, dimensions):
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
return c.Reduce(operand, init_value, xla_computation, dimensions)
|
https://github.com/google/jax/issues/108
|
import jax.numpy as np
from jax import vmap
vmap(np.any)(np.array([[True, False], [False, False]]))
jax/lib/xla_bridge.py:138: UserWarning: No GPU found, falling back to CPU.
warnings.warn('No GPU found, falling back to CPU.')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "jax/api.py", line 149, in batched_fun
out_flat = batching.batch(flat_fun, in_flat, in_axes_, out_axes)
File "jax/interpreters/batching.py", line 43, in batch
out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims)
File "jax/linear_util.py", line 85, in call_wrapped
ans = self.f(*args, **self.kwargs)
File "jax/numpy/lax_numpy.py", line 607, in reduction
result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims)
File "jax/lax.py", line 260, in reduce
dimensions=tuple(dimensions))
File "jax/core.py", line 74, in bind
out_tracer = top_trace.process_primitive(self, tracers, kwargs)
File "jax/interpreters/batching.py", line 119, in process_primitive
val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
TypeError: reducer_batcher() takes exactly 4 arguments (3 given)
|
TypeError
|
def _make_reduction(np_fun, op, init_val, preproc=None):
"""Creates reduction function given a binary operation and monoid identity."""
@_wraps(np_fun)
def reduction(a, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise ValueError("reduction does not support `out` argument.")
a = a if isinstance(a, ndarray) else asarray(a)
a = preproc(a) if preproc else a
dims = _reduction_dims(a, axis)
result_dtype = _dtype(np_fun(onp.ones((), dtype=dtype or _dtype(a))))
if _dtype(a) != result_dtype:
a = lax.convert_element_type(a, result_dtype)
result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims)
if keepdims:
shape_with_singletons = lax.subvals(shape(a), zip(dims, (1,) * len(dims)))
result = lax.reshape(result, shape_with_singletons)
if dtype and onp.dtype(dtype) != onp.dtype(result_dtype):
result = lax.convert_element_type(result, dtype)
return result
return reduction
|
def _make_reduction(np_fun, op, init_val):
"""Creates reduction function given a binary operation and monoid identity."""
@_wraps(op)
def reduction(a, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise ValueError("reduction does not support `out` argument.")
a = a if isinstance(a, ndarray) else asarray(a)
dims = _reduction_dims(a, axis)
result_dtype = _dtype(np_fun(onp.ones((), dtype=dtype or _dtype(a))))
if _dtype(a) != result_dtype:
a = lax.convert_element_type(a, result_dtype)
result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims)
if keepdims:
shape_with_singletons = lax.subvals(shape(a), zip(dims, (1,) * len(dims)))
result = lax.reshape(result, shape_with_singletons)
if dtype and onp.dtype(dtype) != onp.dtype(result_dtype):
result = lax.convert_element_type(result, dtype)
return result
return reduction
|
https://github.com/google/jax/issues/108
|
import jax.numpy as np
from jax import vmap
vmap(np.any)(np.array([[True, False], [False, False]]))
jax/lib/xla_bridge.py:138: UserWarning: No GPU found, falling back to CPU.
warnings.warn('No GPU found, falling back to CPU.')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "jax/api.py", line 149, in batched_fun
out_flat = batching.batch(flat_fun, in_flat, in_axes_, out_axes)
File "jax/interpreters/batching.py", line 43, in batch
out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims)
File "jax/linear_util.py", line 85, in call_wrapped
ans = self.f(*args, **self.kwargs)
File "jax/numpy/lax_numpy.py", line 607, in reduction
result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims)
File "jax/lax.py", line 260, in reduce
dimensions=tuple(dimensions))
File "jax/core.py", line 74, in bind
out_tracer = top_trace.process_primitive(self, tracers, kwargs)
File "jax/interpreters/batching.py", line 119, in process_primitive
val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
TypeError: reducer_batcher() takes exactly 4 arguments (3 given)
|
TypeError
|
def reduction(a, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise ValueError("reduction does not support `out` argument.")
a = a if isinstance(a, ndarray) else asarray(a)
a = preproc(a) if preproc else a
dims = _reduction_dims(a, axis)
result_dtype = _dtype(np_fun(onp.ones((), dtype=dtype or _dtype(a))))
if _dtype(a) != result_dtype:
a = lax.convert_element_type(a, result_dtype)
result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims)
if keepdims:
shape_with_singletons = lax.subvals(shape(a), zip(dims, (1,) * len(dims)))
result = lax.reshape(result, shape_with_singletons)
if dtype and onp.dtype(dtype) != onp.dtype(result_dtype):
result = lax.convert_element_type(result, dtype)
return result
|
def reduction(a, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise ValueError("reduction does not support `out` argument.")
a = a if isinstance(a, ndarray) else asarray(a)
dims = _reduction_dims(a, axis)
result_dtype = _dtype(np_fun(onp.ones((), dtype=dtype or _dtype(a))))
if _dtype(a) != result_dtype:
a = lax.convert_element_type(a, result_dtype)
result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims)
if keepdims:
shape_with_singletons = lax.subvals(shape(a), zip(dims, (1,) * len(dims)))
result = lax.reshape(result, shape_with_singletons)
if dtype and onp.dtype(dtype) != onp.dtype(result_dtype):
result = lax.convert_element_type(result, dtype)
return result
|
https://github.com/google/jax/issues/108
|
import jax.numpy as np
from jax import vmap
vmap(np.any)(np.array([[True, False], [False, False]]))
jax/lib/xla_bridge.py:138: UserWarning: No GPU found, falling back to CPU.
warnings.warn('No GPU found, falling back to CPU.')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "jax/api.py", line 149, in batched_fun
out_flat = batching.batch(flat_fun, in_flat, in_axes_, out_axes)
File "jax/interpreters/batching.py", line 43, in batch
out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims)
File "jax/linear_util.py", line 85, in call_wrapped
ans = self.f(*args, **self.kwargs)
File "jax/numpy/lax_numpy.py", line 607, in reduction
result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims)
File "jax/lax.py", line 260, in reduce
dimensions=tuple(dimensions))
File "jax/core.py", line 74, in bind
out_tracer = top_trace.process_primitive(self, tracers, kwargs)
File "jax/interpreters/batching.py", line 119, in process_primitive
val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
TypeError: reducer_batcher() takes exactly 4 arguments (3 given)
|
TypeError
|
def template_settings(
es_version, ecs_version, mappings_section, template_settings_file
):
if template_settings_file:
with open(template_settings_file) as f:
template = json.load(f)
else:
template = default_template_settings(ecs_version)
if es_version == 6:
mappings_section = copy.deepcopy(mappings_section)
es6_type_fallback(mappings_section["properties"])
# error.stack_trace needs special handling to set
# index: false and doc_values: false if the field
# is present in the mappings
try:
error_stack_trace_mappings = mappings_section["properties"]["error"][
"properties"
]["stack_trace"]
error_stack_trace_mappings.setdefault("index", False)
error_stack_trace_mappings.setdefault("doc_values", False)
except KeyError:
pass
template["mappings"] = {"_doc": mappings_section}
else:
template["mappings"] = mappings_section
# _meta can't be at template root in legacy templates, so moving back to mappings section
# if present
if "_meta" in template:
mappings_section["_meta"] = template.pop("_meta")
return template
|
def template_settings(
es_version, ecs_version, mappings_section, template_settings_file
):
if template_settings_file:
with open(template_settings_file) as f:
template = json.load(f)
else:
template = default_template_settings(ecs_version)
if es_version == 6:
mappings_section = copy.deepcopy(mappings_section)
es6_type_fallback(mappings_section["properties"])
# error.stack_trace needs special handling to set
# index: false and doc_values: false
error_stack_trace_mappings = mappings_section["properties"]["error"][
"properties"
]["stack_trace"]
error_stack_trace_mappings.setdefault("index", False)
error_stack_trace_mappings.setdefault("doc_values", False)
template["mappings"] = {"_doc": mappings_section}
else:
template["mappings"] = mappings_section
# _meta can't be at template root in legacy templates, so moving back to mappings section
mappings_section["_meta"] = template.pop("_meta")
return template
|
https://github.com/elastic/ecs/issues/1190
|
Loading schemas from git ref v1.6.0
Running generator. ECS version 1.6.0
Loading user defined schemas: ['usage-example/fields/custom/']
/Users/florent/Sources/ecs/scripts/schema/cleaner.py:185: UserWarning: Example value for field `header_flags` contains an object or array which must be quoted to avoid YAML interpretation.
This will cause an exception when running in strict mode.
Warning check:
check_example_value(field, strict=strict_mode)
/Users/florent/Sources/ecs/scripts/schema/cleaner.py:185: UserWarning: Example value for field `resolved_ip` contains an object or array which must be quoted to avoid YAML interpretation.
[...]
Traceback (most recent call last):
File "/Users/florent/Sources/ecs/scripts/generator.py", line 106, in <module>
main()
File "/Users/florent/Sources/ecs/scripts/generator.py", line 60, in main
es_template.generate_legacy(flat, ecs_version, out_dir, args.template_settings, args.mapping_settings)
File "/Users/florent/Sources/ecs/scripts/generators/es_template.py", line 109, in generate_legacy
generate_legacy_template_version(6, ecs_version, mappings_section, out_dir, template_settings_file)
File "/Users/florent/Sources/ecs/scripts/generators/es_template.py", line 115, in generate_legacy_template_version
template = template_settings(es_version, ecs_version, mappings_section, template_settings_file)
File "/Users/florent/Sources/ecs/scripts/generators/es_template.py", line 202, in template_settings
error_stack_trace_mappings = mappings_section['properties']['error']['properties']['stack_trace']
KeyError: 'error'
|
KeyError
|
def main():
args = argument_parser()
ecs_version = read_version(args.ref)
print("Running generator. ECS version " + ecs_version)
# default location to save files
out_dir = "generated"
docs_dir = "docs"
if args.out:
default_dirs = False
out_dir = os.path.join(args.out, out_dir)
docs_dir = os.path.join(args.out, docs_dir)
else:
default_dirs = True
ecs_helpers.make_dirs(out_dir)
ecs_helpers.make_dirs(docs_dir)
# To debug issues in the gradual building up of the nested structure, insert
# statements like this after any step of interest.
# ecs_helpers.yaml_dump('ecs.yml', fields)
fields = loader.load_schemas(ref=args.ref, included_files=args.include)
cleaner.clean(fields, strict=args.strict)
finalizer.finalize(fields)
fields = subset_filter.filter(fields, args.subset, out_dir)
nested, flat = intermediate_files.generate(
fields, os.path.join(out_dir, "ecs"), default_dirs
)
if args.intermediate_only:
exit()
csv_generator.generate(flat, ecs_version, out_dir)
es_template.generate(
flat, ecs_version, out_dir, args.template_settings, args.mapping_settings
)
beats.generate(nested, ecs_version, out_dir)
if args.include or args.subset:
exit()
asciidoc_fields.generate(nested, ecs_version, docs_dir)
|
def main():
args = argument_parser()
ecs_version = read_version(args.ref)
print("Running generator. ECS version " + ecs_version)
# default location to save files
out_dir = "generated"
docs_dir = "docs"
if args.out:
default_dirs = False
out_dir = os.path.join(args.out, out_dir)
docs_dir = os.path.join(args.out, docs_dir)
else:
default_dirs = True
ecs_helpers.make_dirs(out_dir)
ecs_helpers.make_dirs(docs_dir)
# To debug issues in the gradual building up of the nested structure, insert
# statements like this after any step of interest.
# ecs_helpers.yaml_dump('ecs.yml', fields)
fields = loader.load_schemas(ref=args.ref, included_files=args.include)
cleaner.clean(fields)
finalizer.finalize(fields)
fields = subset_filter.filter(fields, args.subset, out_dir)
nested, flat = intermediate_files.generate(
fields, os.path.join(out_dir, "ecs"), default_dirs
)
if args.intermediate_only:
exit()
csv_generator.generate(flat, ecs_version, out_dir)
es_template.generate(
flat, ecs_version, out_dir, args.template_settings, args.mapping_settings
)
beats.generate(nested, ecs_version, out_dir)
if args.include or args.subset:
exit()
asciidoc_fields.generate(nested, ecs_version, docs_dir)
|
https://github.com/elastic/ecs/issues/892
|
$ python scripts/generator.py --ref v1.5.0
Loading schemas from git ref v1.5.0
Running generator. ECS version 1.5.0
Traceback (most recent call last):
File "scripts/generator.py", line 94, in <module>
main()
File "scripts/generator.py", line 44, in main
cleaner.clean(fields)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 23, in clean
visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)
File "<redacted>/ecs/scripts/schema/visitor.py", line 21, in visit_fields
visit_fields(details['fields'],
File "<redacted>/ecs/scripts/schema/visitor.py", line 19, in visit_fields
field_func(details)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 125, in field_cleanup
field_assertions_and_warnings(field)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 168, in field_assertions_and_warnings
single_line_short_description(field)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 189, in single_line_short_description
raise ValueError(msg)
ValueError: Short descriptions must be single line, and under 120 characters (current length: 134).
Offending field or field set: number
Short description:
Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet.
|
ValueError
|
def argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--intermediate-only",
action="store_true",
help="generate intermediary files only",
)
parser.add_argument(
"--include",
nargs="+",
help="include user specified directory of custom field definitions",
)
parser.add_argument("--subset", nargs="+", help="render a subset of the schema")
parser.add_argument(
"--out", action="store", help="directory to store the generated files"
)
parser.add_argument(
"--ref", action="store", help="git reference to use when building schemas"
)
parser.add_argument(
"--template-settings",
action="store",
help="index template settings to use when generating elasticsearch template",
)
parser.add_argument(
"--mapping-settings",
action="store",
help="mapping settings to use when generating elasticsearch template",
)
parser.add_argument(
"--strict",
action="store_true",
help="enforce stricter checking at schema cleanup",
)
args = parser.parse_args()
# Clean up empty include of the Makefile
if args.include and [""] == args.include:
args.include.clear()
return args
|
def argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--intermediate-only",
action="store_true",
help="generate intermediary files only",
)
parser.add_argument(
"--include",
nargs="+",
help="include user specified directory of custom field definitions",
)
parser.add_argument("--subset", nargs="+", help="render a subset of the schema")
parser.add_argument(
"--out", action="store", help="directory to store the generated files"
)
parser.add_argument(
"--ref", action="store", help="git reference to use when building schemas"
)
parser.add_argument(
"--template-settings",
action="store",
help="index template settings to use when generating elasticsearch template",
)
parser.add_argument(
"--mapping-settings",
action="store",
help="mapping settings to use when generating elasticsearch template",
)
args = parser.parse_args()
# Clean up empty include of the Makefile
if args.include and [""] == args.include:
args.include.clear()
return args
|
https://github.com/elastic/ecs/issues/892
|
$ python scripts/generator.py --ref v1.5.0
Loading schemas from git ref v1.5.0
Running generator. ECS version 1.5.0
Traceback (most recent call last):
File "scripts/generator.py", line 94, in <module>
main()
File "scripts/generator.py", line 44, in main
cleaner.clean(fields)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 23, in clean
visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)
File "<redacted>/ecs/scripts/schema/visitor.py", line 21, in visit_fields
visit_fields(details['fields'],
File "<redacted>/ecs/scripts/schema/visitor.py", line 19, in visit_fields
field_func(details)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 125, in field_cleanup
field_assertions_and_warnings(field)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 168, in field_assertions_and_warnings
single_line_short_description(field)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 189, in single_line_short_description
raise ValueError(msg)
ValueError: Short descriptions must be single line, and under 120 characters (current length: 134).
Offending field or field set: number
Short description:
Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet.
|
ValueError
|
def clean(fields, strict=False):
global strict_mode
strict_mode = strict
visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)
|
def clean(fields):
visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)
|
https://github.com/elastic/ecs/issues/892
|
$ python scripts/generator.py --ref v1.5.0
Loading schemas from git ref v1.5.0
Running generator. ECS version 1.5.0
Traceback (most recent call last):
File "scripts/generator.py", line 94, in <module>
main()
File "scripts/generator.py", line 44, in main
cleaner.clean(fields)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 23, in clean
visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)
File "<redacted>/ecs/scripts/schema/visitor.py", line 21, in visit_fields
visit_fields(details['fields'],
File "<redacted>/ecs/scripts/schema/visitor.py", line 19, in visit_fields
field_func(details)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 125, in field_cleanup
field_assertions_and_warnings(field)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 168, in field_assertions_and_warnings
single_line_short_description(field)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 189, in single_line_short_description
raise ValueError(msg)
ValueError: Short descriptions must be single line, and under 120 characters (current length: 134).
Offending field or field set: number
Short description:
Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet.
|
ValueError
|
def schema_cleanup(schema):
# Sanity check first
schema_mandatory_attributes(schema)
# trailing space cleanup
ecs_helpers.dict_clean_string_values(schema["schema_details"])
ecs_helpers.dict_clean_string_values(schema["field_details"])
# Some defaults
schema["schema_details"].setdefault("group", 2)
schema["schema_details"].setdefault("root", False)
schema["field_details"].setdefault("type", "group")
schema["field_details"].setdefault("short", schema["field_details"]["description"])
if "reusable" in schema["schema_details"]:
# order to perform chained reuses. Set to 1 if it needs to happen earlier.
schema["schema_details"]["reusable"].setdefault("order", 2)
# Precalculate stuff. Those can't be set in the YAML.
if schema["schema_details"]["root"]:
schema["schema_details"]["prefix"] = ""
else:
schema["schema_details"]["prefix"] = schema["field_details"]["name"] + "."
normalize_reuse_notation(schema)
# Final validity check if in strict mode
schema_assertions_and_warnings(schema)
|
def schema_cleanup(schema):
# Sanity check first
schema_mandatory_attributes(schema)
# trailing space cleanup
ecs_helpers.dict_clean_string_values(schema["schema_details"])
ecs_helpers.dict_clean_string_values(schema["field_details"])
# Some defaults
schema["schema_details"].setdefault("group", 2)
schema["schema_details"].setdefault("root", False)
schema["field_details"].setdefault("type", "group")
schema["field_details"].setdefault("short", schema["field_details"]["description"])
if "reusable" in schema["schema_details"]:
# order to perform chained reuses. Set to 1 if it needs to happen earlier.
schema["schema_details"]["reusable"].setdefault("order", 2)
# Precalculate stuff. Those can't be set in the YAML.
if schema["schema_details"]["root"]:
schema["schema_details"]["prefix"] = ""
else:
schema["schema_details"]["prefix"] = schema["field_details"]["name"] + "."
normalize_reuse_notation(schema)
# Final validity check
schema_assertions_and_warnings(schema)
|
https://github.com/elastic/ecs/issues/892
|
$ python scripts/generator.py --ref v1.5.0
Loading schemas from git ref v1.5.0
Running generator. ECS version 1.5.0
Traceback (most recent call last):
File "scripts/generator.py", line 94, in <module>
main()
File "scripts/generator.py", line 44, in main
cleaner.clean(fields)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 23, in clean
visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)
File "<redacted>/ecs/scripts/schema/visitor.py", line 21, in visit_fields
visit_fields(details['fields'],
File "<redacted>/ecs/scripts/schema/visitor.py", line 19, in visit_fields
field_func(details)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 125, in field_cleanup
field_assertions_and_warnings(field)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 168, in field_assertions_and_warnings
single_line_short_description(field)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 189, in single_line_short_description
raise ValueError(msg)
ValueError: Short descriptions must be single line, and under 120 characters (current length: 134).
Offending field or field set: number
Short description:
Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet.
|
ValueError
|
def schema_assertions_and_warnings(schema):
"""Additional checks on a fleshed out schema"""
single_line_short_description(schema, strict=strict_mode)
|
def schema_assertions_and_warnings(schema):
"""Additional checks on a fleshed out schema"""
single_line_short_description(schema)
|
https://github.com/elastic/ecs/issues/892
|
$ python scripts/generator.py --ref v1.5.0
Loading schemas from git ref v1.5.0
Running generator. ECS version 1.5.0
Traceback (most recent call last):
File "scripts/generator.py", line 94, in <module>
main()
File "scripts/generator.py", line 44, in main
cleaner.clean(fields)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 23, in clean
visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)
File "<redacted>/ecs/scripts/schema/visitor.py", line 21, in visit_fields
visit_fields(details['fields'],
File "<redacted>/ecs/scripts/schema/visitor.py", line 19, in visit_fields
field_func(details)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 125, in field_cleanup
field_assertions_and_warnings(field)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 168, in field_assertions_and_warnings
single_line_short_description(field)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 189, in single_line_short_description
raise ValueError(msg)
ValueError: Short descriptions must be single line, and under 120 characters (current length: 134).
Offending field or field set: number
Short description:
Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet.
|
ValueError
|
def field_assertions_and_warnings(field):
"""Additional checks on a fleshed out field"""
if not ecs_helpers.is_intermediate(field):
# check short description length if in strict mode
single_line_short_description(field, strict=strict_mode)
if field["field_details"]["level"] not in ACCEPTABLE_FIELD_LEVELS:
msg = "Invalid level for field '{}'.\nValue: {}\nAcceptable values: {}".format(
field["field_details"]["name"],
field["field_details"]["level"],
ACCEPTABLE_FIELD_LEVELS,
)
raise ValueError(msg)
|
def field_assertions_and_warnings(field):
"""Additional checks on a fleshed out field"""
if not ecs_helpers.is_intermediate(field):
single_line_short_description(field)
if field["field_details"]["level"] not in ACCEPTABLE_FIELD_LEVELS:
msg = "Invalid level for field '{}'.\nValue: {}\nAcceptable values: {}".format(
field["field_details"]["name"],
field["field_details"]["level"],
ACCEPTABLE_FIELD_LEVELS,
)
raise ValueError(msg)
|
https://github.com/elastic/ecs/issues/892
|
$ python scripts/generator.py --ref v1.5.0
Loading schemas from git ref v1.5.0
Running generator. ECS version 1.5.0
Traceback (most recent call last):
File "scripts/generator.py", line 94, in <module>
main()
File "scripts/generator.py", line 44, in main
cleaner.clean(fields)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 23, in clean
visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)
File "<redacted>/ecs/scripts/schema/visitor.py", line 21, in visit_fields
visit_fields(details['fields'],
File "<redacted>/ecs/scripts/schema/visitor.py", line 19, in visit_fields
field_func(details)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 125, in field_cleanup
field_assertions_and_warnings(field)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 168, in field_assertions_and_warnings
single_line_short_description(field)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 189, in single_line_short_description
raise ValueError(msg)
ValueError: Short descriptions must be single line, and under 120 characters (current length: 134).
Offending field or field set: number
Short description:
Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet.
|
ValueError
|
def single_line_short_description(schema_or_field, strict=True):
short_length = len(schema_or_field["field_details"]["short"])
if "\n" in schema_or_field["field_details"]["short"] or short_length > SHORT_LIMIT:
msg = "Short descriptions must be single line, and under {} characters (current length: {}).\n".format(
SHORT_LIMIT, short_length
)
msg += "Offending field or field set: {}\nShort description:\n {}".format(
schema_or_field["field_details"]["name"],
schema_or_field["field_details"]["short"],
)
if strict:
raise ValueError(msg)
else:
ecs_helpers.strict_warning(msg)
|
def single_line_short_description(schema_or_field):
short_length = len(schema_or_field["field_details"]["short"])
if "\n" in schema_or_field["field_details"]["short"] or short_length > SHORT_LIMIT:
msg = "Short descriptions must be single line, and under {} characters (current length: {}).\n".format(
SHORT_LIMIT, short_length
)
msg += "Offending field or field set: {}\nShort description:\n {}".format(
schema_or_field["field_details"]["name"],
schema_or_field["field_details"]["short"],
)
raise ValueError(msg)
|
https://github.com/elastic/ecs/issues/892
|
$ python scripts/generator.py --ref v1.5.0
Loading schemas from git ref v1.5.0
Running generator. ECS version 1.5.0
Traceback (most recent call last):
File "scripts/generator.py", line 94, in <module>
main()
File "scripts/generator.py", line 44, in main
cleaner.clean(fields)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 23, in clean
visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)
File "<redacted>/ecs/scripts/schema/visitor.py", line 21, in visit_fields
visit_fields(details['fields'],
File "<redacted>/ecs/scripts/schema/visitor.py", line 19, in visit_fields
field_func(details)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 125, in field_cleanup
field_assertions_and_warnings(field)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 168, in field_assertions_and_warnings
single_line_short_description(field)
File "<redacted>/ecs/scripts/schema/cleaner.py", line 189, in single_line_short_description
raise ValueError(msg)
ValueError: Short descriptions must be single line, and under 120 characters (current length: 134).
Offending field or field set: number
Short description:
Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet.
|
ValueError
|
def __repr__(self):
info = {}
if self._model_meta is not None:
if hasattr(self._model_meta, "run_id") and self._model_meta.run_id is not None:
info["run_id"] = self._model_meta.run_id
if (
hasattr(self._model_meta, "artifact_path")
and self._model_meta.artifact_path is not None
):
info["artifact_path"] = self._model_meta.artifact_path
info["flavor"] = self._model_meta.flavors[FLAVOR_NAME]["loader_module"]
return yaml.safe_dump(
{"mlflow.pyfunc.loaded_model": info}, default_flow_style=False
)
|
def __repr__(self):
info = {}
if self._model_meta is not None:
if self._model_meta.run_id is not None:
info["run_id"] = self._model_meta.run_id
if self._model_meta.artifact_path is not None:
info["artifact_path"] = self._model_meta.artifact_path
info["flavor"] = self._model_meta.flavors[FLAVOR_NAME]["loader_module"]
return yaml.safe_dump(
{"mlflow.pyfunc.loaded_model": info}, default_flow_style=False
)
|
https://github.com/mlflow/mlflow/issues/3550
|
from sklearn import datasets
from sklearn.model_selection import train_test_split
import mlflow
import xgboost as xgb
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
params = {
"objective": "multi:softprob",
"num_class": 3,
"learning_rate": 0.3,
"eval_metric": "mlogloss",
"seed": 42
}
model = xgb.train(params, dtrain, evals=[(dtrain, "train")])
[0] train-mlogloss:0.74723
[1] train-mlogloss:0.54060
[2] train-mlogloss:0.40276
[3] train-mlogloss:0.30789
[4] train-mlogloss:0.24052
[5] train-mlogloss:0.19087
[6] train-mlogloss:0.15471
[7] train-mlogloss:0.12807
[8] train-mlogloss:0.10722
[9] train-mlogloss:0.09053
mlflow.xgboost.save_model(model, 'model')
model2 = mlflow.xgboost.load_model('model')
model2
<xgboost.core.Booster object at 0x7f9e1798d0f0>
model3 = mlflow.pyfunc.load_model('model')
model3
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/lorenz/opt/miniconda3/envs/mlflow-1.11.0/lib/python3.6/site-packages/mlflow/pyfunc/__init__.py", line 437, in __repr__
if self._model_meta.run_id is not None:
AttributeError: 'Model' object has no attribute 'run_id'
|
AttributeError
|
def _parse_search_registered_models_order_by(cls, order_by_list):
"""Sorts a set of registered models based on their natural ordering and an overriding set
of order_bys. Registered models are naturally ordered first by name ascending.
"""
clauses = []
observed_order_by_clauses = set()
if order_by_list:
for order_by_clause in order_by_list:
(
attribute_token,
ascending,
) = SearchUtils.parse_order_by_for_search_registered_models(order_by_clause)
if attribute_token == SqlRegisteredModel.name.key:
field = SqlRegisteredModel.name
elif attribute_token in SearchUtils.VALID_TIMESTAMP_ORDER_BY_KEYS:
field = SqlRegisteredModel.last_updated_time
else:
raise MlflowException(
"Invalid order by key '{}' specified.".format(attribute_token)
+ "Valid keys are "
+ "'{}'".format(
SearchUtils.RECOMMENDED_ORDER_BY_KEYS_REGISTERED_MODELS
),
error_code=INVALID_PARAMETER_VALUE,
)
if field.key in observed_order_by_clauses:
raise MlflowException(
"`order_by` contains duplicate fields: {}".format(order_by_list)
)
observed_order_by_clauses.add(field.key)
if ascending:
clauses.append(field.asc())
else:
clauses.append(field.desc())
if SqlRegisteredModel.name.key not in observed_order_by_clauses:
clauses.append(SqlRegisteredModel.name.asc())
return clauses
|
def _parse_search_registered_models_order_by(cls, order_by_list):
"""Sorts a set of registered models based on their natural ordering and an overriding set
of order_bys. Registered models are naturally ordered first by name ascending.
"""
clauses = []
if order_by_list:
for order_by_clause in order_by_list:
(
attribute_token,
ascending,
) = SearchUtils.parse_order_by_for_search_registered_models(order_by_clause)
if attribute_token == SqlRegisteredModel.name.key:
field = SqlRegisteredModel.name
elif attribute_token in SearchUtils.VALID_TIMESTAMP_ORDER_BY_KEYS:
field = SqlRegisteredModel.last_updated_time
else:
raise MlflowException(
"Invalid order by key '{}' specified.".format(attribute_token)
+ "Valid keys are "
+ "'{}'".format(
SearchUtils.RECOMMENDED_ORDER_BY_KEYS_REGISTERED_MODELS
),
error_code=INVALID_PARAMETER_VALUE,
)
if ascending:
clauses.append(field.asc())
else:
clauses.append(field.desc())
clauses.append(SqlRegisteredModel.name.asc())
return clauses
|
https://github.com/mlflow/mlflow/issues/3217
|
Traceback (most recent call last):
File "/opt/app-root/lib64/python3.8/site-packages/mlflow/store/db/utils.py", line 76, in make_managed_session
yield session
File "/opt/app-root/lib64/python3.8/site-packages/mlflow/store/model_registry/sqlalchemy_store.py", line 331, in search_registered_models
sql_registered_models = query.all()
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/orm/query.py", line 3233, in all
return list(self)
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/orm/query.py", line 3389, in __iter__
return self._execute_and_instances(context)
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/orm/query.py", line 3414, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/engine/base.py", line 982, in execute
return meth(self, multiparams, params)
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/sql/elements.py", line 293, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/engine/base.py", line 1095, in _execute_clauseelement
ret = self._execute_context(
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/engine/base.py", line 1249, in _execute_context
self._handle_dbapi_exception(
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/engine/base.py", line 1476, in _handle_dbapi_exception
util.raise_from_cause(sqlalchemy_exception, exc_info)
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/util/compat.py", line 398, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/util/compat.py", line 152, in reraise
raise value.with_traceback(tb)
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/engine/base.py", line 1245, in _execute_context
self.dialect.do_execute(
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/engine/default.py", line 588, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.ProgrammingError: (pyodbc.ProgrammingError) ('42000', '[42000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]A column has been specified more than once in the order by list. Columns in the order by list must be unique. (169) (SQLExecDirectW)')
[SQL: SELECT TOP 11 registered_models.name AS registered_models_name, registered_models.creation_time AS registered_models_creation_time, registered_models.last_updated_time AS registered_models_last_updated_time, registered_models.description AS registered_models_description
FROM registered_models
WHERE lower(registered_models.name) LIKE lower(?) ORDER BY registered_models.name ASC, registered_models.name ASC]
[parameters: ('%%',)]
(Background on this error at: http://sqlalche.me/e/f405)
|
sqlalchemy.exc.ProgrammingError
|
def _get_orderby_clauses(order_by_list, session):
"""Sorts a set of runs based on their natural ordering and an overriding set of order_bys.
Runs are naturally ordered first by start time descending, then by run id for tie-breaking.
"""
clauses = []
ordering_joins = []
clause_id = 0
observed_order_by_clauses = set()
# contrary to filters, it is not easily feasible to separately handle sorting
# on attributes and on joined tables as we must keep all clauses in the same order
if order_by_list:
for order_by_clause in order_by_list:
clause_id += 1
(key_type, key, ascending) = SearchUtils.parse_order_by_for_search_runs(
order_by_clause
)
if SearchUtils.is_attribute(key_type, "="):
order_value = getattr(SqlRun, SqlRun.get_attribute_name(key))
else:
if SearchUtils.is_metric(key_type, "="): # any valid comparator
entity = SqlLatestMetric
elif SearchUtils.is_tag(key_type, "="):
entity = SqlTag
elif SearchUtils.is_param(key_type, "="):
entity = SqlParam
else:
raise MlflowException(
"Invalid identifier type '%s'" % key_type,
error_code=INVALID_PARAMETER_VALUE,
)
# build a subquery first because we will join it in the main request so that the
# metric we want to sort on is available when we apply the sorting clause
subquery = session.query(entity).filter(entity.key == key).subquery()
ordering_joins.append(subquery)
order_value = subquery.c.value
# sqlite does not support NULLS LAST expression, so we sort first by
# presence of the field (and is_nan for metrics), then by actual value
# As the subqueries are created independently and used later in the
# same main query, the CASE WHEN columns need to have unique names to
# avoid ambiguity
if SearchUtils.is_metric(key_type, "="):
clauses.append(
sql.case(
[(subquery.c.is_nan.is_(True), 1), (order_value.is_(None), 1)],
else_=0,
).label("clause_%s" % clause_id)
)
else: # other entities do not have an 'is_nan' field
clauses.append(
sql.case([(order_value.is_(None), 1)], else_=0).label(
"clause_%s" % clause_id
)
)
if (key_type, key) in observed_order_by_clauses:
raise MlflowException(
"`order_by` contains duplicate fields: {}".format(order_by_list)
)
observed_order_by_clauses.add((key_type, key))
if ascending:
clauses.append(order_value)
else:
clauses.append(order_value.desc())
if (
SearchUtils._ATTRIBUTE_IDENTIFIER,
SqlRun.start_time.key,
) not in observed_order_by_clauses:
clauses.append(SqlRun.start_time.desc())
clauses.append(SqlRun.run_uuid)
return clauses, ordering_joins
|
def _get_orderby_clauses(order_by_list, session):
"""Sorts a set of runs based on their natural ordering and an overriding set of order_bys.
Runs are naturally ordered first by start time descending, then by run id for tie-breaking.
"""
clauses = []
ordering_joins = []
clause_id = 0
# contrary to filters, it is not easily feasible to separately handle sorting
# on attributes and on joined tables as we must keep all clauses in the same order
if order_by_list:
for order_by_clause in order_by_list:
clause_id += 1
(key_type, key, ascending) = SearchUtils.parse_order_by_for_search_runs(
order_by_clause
)
if SearchUtils.is_attribute(key_type, "="):
order_value = getattr(SqlRun, SqlRun.get_attribute_name(key))
else:
if SearchUtils.is_metric(key_type, "="): # any valid comparator
entity = SqlLatestMetric
elif SearchUtils.is_tag(key_type, "="):
entity = SqlTag
elif SearchUtils.is_param(key_type, "="):
entity = SqlParam
else:
raise MlflowException(
"Invalid identifier type '%s'" % key_type,
error_code=INVALID_PARAMETER_VALUE,
)
# build a subquery first because we will join it in the main request so that the
# metric we want to sort on is available when we apply the sorting clause
subquery = session.query(entity).filter(entity.key == key).subquery()
ordering_joins.append(subquery)
order_value = subquery.c.value
# sqlite does not support NULLS LAST expression, so we sort first by
# presence of the field (and is_nan for metrics), then by actual value
# As the subqueries are created independently and used later in the
# same main query, the CASE WHEN columns need to have unique names to
# avoid ambiguity
if SearchUtils.is_metric(key_type, "="):
clauses.append(
sql.case(
[(subquery.c.is_nan.is_(True), 1), (order_value.is_(None), 1)],
else_=0,
).label("clause_%s" % clause_id)
)
else: # other entities do not have an 'is_nan' field
clauses.append(
sql.case([(order_value.is_(None), 1)], else_=0).label(
"clause_%s" % clause_id
)
)
if ascending:
clauses.append(order_value)
else:
clauses.append(order_value.desc())
clauses.append(SqlRun.start_time.desc())
clauses.append(SqlRun.run_uuid)
return clauses, ordering_joins
|
https://github.com/mlflow/mlflow/issues/3217
|
Traceback (most recent call last):
File "/opt/app-root/lib64/python3.8/site-packages/mlflow/store/db/utils.py", line 76, in make_managed_session
yield session
File "/opt/app-root/lib64/python3.8/site-packages/mlflow/store/model_registry/sqlalchemy_store.py", line 331, in search_registered_models
sql_registered_models = query.all()
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/orm/query.py", line 3233, in all
return list(self)
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/orm/query.py", line 3389, in __iter__
return self._execute_and_instances(context)
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/orm/query.py", line 3414, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/engine/base.py", line 982, in execute
return meth(self, multiparams, params)
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/sql/elements.py", line 293, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/engine/base.py", line 1095, in _execute_clauseelement
ret = self._execute_context(
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/engine/base.py", line 1249, in _execute_context
self._handle_dbapi_exception(
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/engine/base.py", line 1476, in _handle_dbapi_exception
util.raise_from_cause(sqlalchemy_exception, exc_info)
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/util/compat.py", line 398, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/util/compat.py", line 152, in reraise
raise value.with_traceback(tb)
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/engine/base.py", line 1245, in _execute_context
self.dialect.do_execute(
File "/opt/app-root/lib64/python3.8/site-packages/sqlalchemy/engine/default.py", line 588, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.ProgrammingError: (pyodbc.ProgrammingError) ('42000', '[42000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]A column has been specified more than once in the order by list. Columns in the order by list must be unique. (169) (SQLExecDirectW)')
[SQL: SELECT TOP 11 registered_models.name AS registered_models_name, registered_models.creation_time AS registered_models_creation_time, registered_models.last_updated_time AS registered_models_last_updated_time, registered_models.description AS registered_models_description
FROM registered_models
WHERE lower(registered_models.name) LIKE lower(?) ORDER BY registered_models.name ASC, registered_models.name ASC]
[parameters: ('%%',)]
(Background on this error at: http://sqlalche.me/e/f405)
|
sqlalchemy.exc.ProgrammingError
|
def list_artifacts(self, path=None):
with self.get_ftp_client() as ftp:
artifact_dir = self.path
list_dir = posixpath.join(artifact_dir, path) if path else artifact_dir
if not self._is_dir(ftp, list_dir):
return []
artifact_files = ftp.nlst(list_dir)
artifact_files = list(filter(lambda x: x != "." and x != "..", artifact_files))
# Make sure artifact_files is a list of file names because ftp.nlst
# may return absolute paths.
artifact_files = [os.path.basename(f) for f in artifact_files]
infos = []
for file_name in artifact_files:
file_path = file_name if path is None else posixpath.join(path, file_name)
full_file_path = posixpath.join(list_dir, file_name)
if self._is_dir(ftp, full_file_path):
infos.append(FileInfo(file_path, True, None))
else:
size = self._size(ftp, full_file_path)
infos.append(FileInfo(file_path, False, size))
return infos
|
def list_artifacts(self, path=None):
with self.get_ftp_client() as ftp:
artifact_dir = self.path
list_dir = posixpath.join(artifact_dir, path) if path else artifact_dir
if not self._is_dir(ftp, list_dir):
return []
artifact_files = ftp.nlst(list_dir)
artifact_files = list(filter(lambda x: x != "." and x != "..", artifact_files))
infos = []
for file_name in artifact_files:
file_path = file_name if path is None else posixpath.join(path, file_name)
full_file_path = posixpath.join(list_dir, file_name)
if self._is_dir(ftp, full_file_path):
infos.append(FileInfo(file_path, True, None))
else:
size = self._size(ftp, full_file_path)
infos.append(FileInfo(file_path, False, size))
return infos
|
https://github.com/mlflow/mlflow/issues/3197
|
Traceback (most recent call last):
File "/Users/shaneing/PycharmProjects/gswatch/examples/load_model_from_ftp.py", line 9, in <module>
model = torch_load_model('ftp://mlflow:mlflow@10.10.10.1/mlruns/8/6f11ca34312a44df88f6b86f2f224f66/artifacts/fake-model/model')
└ <function load_model at 0x12e375710>
File "/usr/local/anaconda3/envs/gswatch3.7/lib/python3.7/site-packages/mlflow/pytorch/__init__.py", line 407, in load_model
pytorch_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
│ │ └ 'pytorch'
│ └ '/var/folders/px/ncc8244n0sj6zsh5s6k8v7g40000gn/T/tmpo81ye7y5/model'
└ <function _get_flavor_configuration at 0x12e36d7a0>
File "/usr/local/anaconda3/envs/gswatch3.7/lib/python3.7/site-packages/mlflow/utils/model_utils.py", line 26, in _get_flavor_configuration
RESOURCE_DOES_NOT_EXIST)
mlflow.exceptions.MlflowException: Could not find an "MLmodel" configuration file at "/var/folders/px/ncc8244n0sj6zsh5s6k8v7g40000gn/T/tmpo81ye7y5/model"
|
mlflow.exceptions.MlflowException
|
def log_artifacts(self, local_dir, artifact_path=None):
dest_path = posixpath.join(self.path, artifact_path) if artifact_path else self.path
local_dir = os.path.abspath(local_dir)
for root, _, filenames in os.walk(local_dir):
upload_path = dest_path
if root != local_dir:
rel_path = os.path.relpath(root, local_dir)
upload_path = relative_path_to_artifact_path(rel_path)
if not filenames:
with self.get_ftp_client() as ftp:
self._mkdir(ftp, posixpath.join(self.path, upload_path))
for f in filenames:
if os.path.isfile(os.path.join(root, f)):
self.log_artifact(os.path.join(root, f), upload_path)
|
def log_artifacts(self, local_dir, artifact_path=None):
dest_path = posixpath.join(self.path, artifact_path) if artifact_path else self.path
dest_path = posixpath.join(dest_path, os.path.split(local_dir)[1])
dest_path_re = os.path.split(local_dir)[1]
if artifact_path:
dest_path_re = posixpath.join(artifact_path, os.path.split(local_dir)[1])
local_dir = os.path.abspath(local_dir)
for root, _, filenames in os.walk(local_dir):
upload_path = dest_path
if root != local_dir:
rel_path = os.path.relpath(root, local_dir)
rel_path = relative_path_to_artifact_path(rel_path)
upload_path = posixpath.join(dest_path_re, rel_path)
if not filenames:
with self.get_ftp_client() as ftp:
self._mkdir(ftp, posixpath.join(self.path, upload_path))
for f in filenames:
if os.path.isfile(os.path.join(root, f)):
self.log_artifact(os.path.join(root, f), upload_path)
|
https://github.com/mlflow/mlflow/issues/2641
|
load_model("models:/MyKerasModel/3")
Traceback (most recent call last):
File "mini.py", line 6, in <module>
load_model("models:/MyKerasModel/3")
File "/Users/daniel.dercks/opt/anaconda3/envs/berner_macos_nogpu/lib/python3.7/site-packages/mlflow/keras.py", line 391, in load_model
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
File "/Users/daniel.dercks/opt/anaconda3/envs/berner_macos_nogpu/lib/python3.7/site-packages/mlflow/utils/model_utils.py", line 26, in _get_flavor_configuration
RESOURCE_DOES_NOT_EXIST)
mlflow.exceptions.MlflowException: Could not find an "MLmodel" configuration file at "/var/folders/jx/02yypg8x5wjdhhr54xcmzhy87m0bvt/T/tmpmo0g0x22/"
|
mlflow.exceptions.MlflowException
|
def _enforce_type(name, values: pandas.Series, t: DataType):
"""
Enforce the input column type matches the declared in model input schema.
The following type conversions are allowed:
1. np.object -> string
2. int -> long (upcast)
3. float -> double (upcast)
Any other type mismatch will raise error.
"""
if values.dtype == np.object and t not in (DataType.binary, DataType.string):
values = values.infer_objects()
if t == DataType.string and values.dtype == np.object:
# NB: strings are by default parsed and inferred as objects, but it is
# recommended to use StringDtype extension type if available. See
#
# `https://pandas.pydata.org/pandas-docs/stable/user_guide/text.html`
#
# for more detail.
try:
return values.astype(t.to_pandas(), errors="raise")
except ValueError:
raise MlflowException(
"Failed to convert column {0} from type {1} to {2}.".format(
name, values.dtype, t
)
)
if values.dtype in (t.to_pandas(), t.to_numpy()):
# The types are already compatible => conversion is not necessary.
return values
if t == DataType.binary and values.dtype.kind == t.binary.to_numpy().kind:
# NB: bytes in numpy have variable itemsize depending on the length of the longest
# element in the array (column). Since MLflow binary type is length agnostic, we ignore
# itemsize when matching binary columns.
return values
numpy_type = t.to_numpy()
is_compatible_type = values.dtype.kind == numpy_type.kind
is_upcast = values.dtype.itemsize <= numpy_type.itemsize
if is_compatible_type and is_upcast:
return values.astype(numpy_type, errors="raise")
else:
# NB: conversion between incompatible types (e.g. floats -> ints or
# double -> float) are not allowed. While supported by pandas and numpy,
# these conversions alter the values significantly.
raise MlflowException(
"Incompatible input types for column {0}. "
"Can not safely convert {1} to {2}.".format(name, values.dtype, numpy_type)
)
|
def _enforce_type(name, values: pandas.Series, t: DataType):
"""
Enforce the input column type matches the declared in model input schema.
The following type conversions are allowed:
1. np.object -> string
2. int -> long (upcast)
3. float -> double (upcast)
Any other type mismatch will raise error.
"""
if values.dtype == np.object and t not in (DataType.binary, DataType.string):
values = values.infer_objects()
if values.dtype in (t.to_pandas(), t.to_numpy()):
# The types are already compatible => conversion is not necessary.
return values
if t == DataType.binary and values.dtype.kind == t.binary.to_numpy().kind:
# NB: bytes in numpy have variable itemsize depending on the length of the longest
# element in the array (column). Since MLflow binary type is length agnostic, we ignore
# itemsize when matching binary columns.
return values
if t == DataType.string and values.dtype == np.object:
# NB: strings are by default parsed and inferred as objects, but it is
# recommended to use StringDtype extension type if available. See
#
# `https://pandas.pydata.org/pandas-docs/stable/user_guide/text.html`
#
# for more detail.
try:
return values.astype(t.to_pandas(), errors="raise")
except ValueError:
raise MlflowException(
"Failed to convert column {0} from type {1} to {2}.".format(
name, values.dtype, t
)
)
numpy_type = t.to_numpy()
is_compatible_type = values.dtype.kind == numpy_type.kind
is_upcast = values.dtype.itemsize <= numpy_type.itemsize
if is_compatible_type and is_upcast:
return values.astype(numpy_type, errors="raise")
else:
# NB: conversion between incompatible types (e.g. floats -> ints or
# double -> float) are not allowed. While supported by pandas and numpy,
# these conversions alter the values significantly.
raise MlflowException(
"Incompatible input types for column {0}. "
"Can not safely convert {1} to {2}.".format(name, values.dtype, numpy_type)
)
|
https://github.com/mlflow/mlflow/issues/3006
|
mlflow version: 1.9.1.dev0
pandas version: 1.0.0
Traceback (most recent call last):
File "test.py", line 18, in <module>
_enforce_schema(df, schema)
File "/Users/xxx/Desktop/projects/mlflow/mlflow/pyfunc/__init__.py", line 372, in _enforce_schema
new_pdf[x] = _enforce_type(x, pdf[x], col_types[i])
File "/Users/xxx/Desktop/projects/mlflow/mlflow/pyfunc/__init__.py", line 288, in _enforce_type
if values.dtype in (t.to_pandas(), t.to_numpy()):
TypeError: data type not understood
|
TypeError
|
def _get_rest_store(store_uri, **_):
return RestStore(partial(_get_default_host_creds, store_uri))
|
def _get_rest_store(store_uri, **_):
def get_default_host_creds():
return rest_utils.MlflowHostCreds(
host=store_uri,
username=os.environ.get(_TRACKING_USERNAME_ENV_VAR),
password=os.environ.get(_TRACKING_PASSWORD_ENV_VAR),
token=os.environ.get(_TRACKING_TOKEN_ENV_VAR),
ignore_tls_verification=os.environ.get(_TRACKING_INSECURE_TLS_ENV_VAR)
== "true",
client_cert_path=os.environ.get(_TRACKING_CLIENT_CERT_PATH_ENV_VAR),
server_cert_path=os.environ.get(_TRACKING_SERVER_CERT_PATH_ENV_VAR),
)
return RestStore(get_default_host_creds)
|
https://github.com/mlflow/mlflow/issues/2954
|
python mlflow_pickle_error.py
Traceback (most recent call last):
File "mlflow_test.py", line 14, in <module>
pickle.dump(cls, open('test_dump.pkl', 'wb'))
AttributeError: Can't pickle local object '_get_rest_store.<locals>.get_default_host_creds'
|
AttributeError
|
def _call_endpoint(self, service, api, json_body):
db_profile = get_db_profile_from_uri(mlflow.tracking.get_tracking_uri())
db_creds = get_databricks_host_creds(db_profile)
endpoint, method = _SERVICE_AND_METHOD_TO_INFO[service][api]
response_proto = api.Response()
return call_endpoint(db_creds, endpoint, method, json_body, response_proto)
|
def _call_endpoint(self, service, api, json_body):
endpoint, method = _SERVICE_AND_METHOD_TO_INFO[service][api]
response_proto = api.Response()
return call_endpoint(
get_databricks_host_creds(), endpoint, method, json_body, response_proto
)
|
https://github.com/mlflow/mlflow/issues/2954
|
python mlflow_pickle_error.py
Traceback (most recent call last):
File "mlflow_test.py", line 14, in <module>
pickle.dump(cls, open('test_dump.pkl', 'wb'))
AttributeError: Can't pickle local object '_get_rest_store.<locals>.get_default_host_creds'
|
AttributeError
|
def list_artifacts(self, path=None):
if path:
run_relative_path = posixpath.join(
self.run_relative_artifact_repo_root_path, path
)
else:
run_relative_path = self.run_relative_artifact_repo_root_path
infos = []
page_token = None
while True:
if page_token:
json_body = message_to_json(
ListArtifacts(
run_id=self.run_id, path=run_relative_path, page_token=page_token
)
)
else:
json_body = message_to_json(
ListArtifacts(run_id=self.run_id, path=run_relative_path)
)
response = self._call_endpoint(MlflowService, ListArtifacts, json_body)
artifact_list = response.files
# If `path` is a file, ListArtifacts returns a single list element with the
# same name as `path`. The list_artifacts API expects us to return an empty list in this
# case, so we do so here.
if (
len(artifact_list) == 1
and artifact_list[0].path == run_relative_path
and not artifact_list[0].is_dir
):
return []
for output_file in artifact_list:
file_rel_path = posixpath.relpath(
path=output_file.path, start=self.run_relative_artifact_repo_root_path
)
artifact_size = None if output_file.is_dir else output_file.file_size
infos.append(FileInfo(file_rel_path, output_file.is_dir, artifact_size))
if len(artifact_list) == 0 or not response.next_page_token:
break
page_token = response.next_page_token
return infos
|
def list_artifacts(self, path=None):
if path:
run_relative_path = posixpath.join(
self.run_relative_artifact_repo_root_path, path
)
else:
run_relative_path = self.run_relative_artifact_repo_root_path
infos = []
page_token = None
while True:
if page_token:
json_body = message_to_json(
ListArtifacts(
run_id=self.run_id, path=run_relative_path, page_token=page_token
)
)
else:
json_body = message_to_json(
ListArtifacts(run_id=self.run_id, path=run_relative_path)
)
response = self._call_endpoint(MlflowService, ListArtifacts, json_body)
artifact_list = response.files
# If `path` is a file, ListArtifacts returns a single list element with the
# same name as `path`. The list_artifacts API expects us to return an empty list in this
# case, so we do so here.
if (
len(artifact_list) == 1
and artifact_list[0].path == path
and not artifact_list[0].is_dir
):
return []
for output_file in artifact_list:
file_rel_path = posixpath.relpath(
path=output_file.path, start=self.run_relative_artifact_repo_root_path
)
artifact_size = None if output_file.is_dir else output_file.file_size
infos.append(FileInfo(file_rel_path, output_file.is_dir, artifact_size))
if len(artifact_list) == 0 or not response.next_page_token:
break
page_token = response.next_page_token
return infos
|
https://github.com/mlflow/mlflow/issues/2954
|
python mlflow_pickle_error.py
Traceback (most recent call last):
File "mlflow_test.py", line 14, in <module>
pickle.dump(cls, open('test_dump.pkl', 'wb'))
AttributeError: Can't pickle local object '_get_rest_store.<locals>.get_default_host_creds'
|
AttributeError
|
def to_pandas(self) -> np.dtype:
"""Get equivalent pandas data type."""
return self._pandas_type
|
def to_pandas(self) -> Union[np.dtype, PandasExtensionDtype]:
"""Get equivalent pandas data type."""
return self._pandas_type
|
https://github.com/mlflow/mlflow/issues/2954
|
python mlflow_pickle_error.py
Traceback (most recent call last):
File "mlflow_test.py", line 14, in <module>
pickle.dump(cls, open('test_dump.pkl', 'wb'))
AttributeError: Can't pickle local object '_get_rest_store.<locals>.get_default_host_creds'
|
AttributeError
|
def _save_model_with_loader_module_and_data_path(
path,
loader_module,
data_path=None,
code_paths=None,
conda_env=None,
mlflow_model=Model(),
):
"""
Export model as a generic Python function model.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``.
:param data_path: Path to a file or directory containing model data.
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in.
:return: Model configuration containing model info.
"""
if os.path.exists(path):
raise MlflowException(
message="Path '{}' already exists".format(path),
error_code=RESOURCE_ALREADY_EXISTS,
)
os.makedirs(path)
code = None
data = None
if data_path is not None:
model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir="data")
data = model_file
if code_paths is not None:
for code_path in code_paths:
_copy_file_or_tree(src=code_path, dst=path, dst_dir="code")
code = "code"
conda_env_subpath = "mlflow_env.yml"
if conda_env is None:
conda_env = get_default_conda_env()
elif not isinstance(conda_env, dict):
with open(conda_env, "r") as f:
conda_env = yaml.safe_load(f)
with open(os.path.join(path, conda_env_subpath), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
mlflow.pyfunc.add_to_model(
mlflow_model,
loader_module=loader_module,
code=code,
data=data,
env=conda_env_subpath,
)
mlflow_model.save(os.path.join(path, "MLmodel"))
return mlflow_model
|
def _save_model_with_loader_module_and_data_path(
path,
loader_module,
data_path=None,
code_paths=None,
conda_env=None,
mlflow_model=Model(),
):
"""
Export model as a generic Python function model.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``.
:param data_path: Path to a file or directory containing model data.
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in.
:return: Model configuration containing model info.
"""
if os.path.exists(path):
raise MlflowException(
message="Path '{}' already exists".format(path),
error_code=RESOURCE_ALREADY_EXISTS,
)
os.makedirs(path)
code = None
data = None
env = None
if data_path is not None:
model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir="data")
data = model_file
if code_paths is not None:
for code_path in code_paths:
_copy_file_or_tree(src=code_path, dst=path, dst_dir="code")
code = "code"
if conda_env is not None:
shutil.copy(src=conda_env, dst=os.path.join(path, "mlflow_env.yml"))
env = "mlflow_env.yml"
mlflow.pyfunc.add_to_model(
mlflow_model, loader_module=loader_module, code=code, data=data, env=env
)
mlflow_model.save(os.path.join(path, "MLmodel"))
return mlflow_model
|
https://github.com/mlflow/mlflow/issues/2580
|
import mlflow.pyfunc
mlflow.pyfunc.log_model('x', 'x', conda_env={})
Traceback (most recent call last):
...
TypeError: stat: path should be string, bytes, os.PathLike or integer, not dict
|
TypeError
|
def _create_default_experiment(self, session):
"""
MLflow UI and client code expects a default experiment with ID 0.
This method uses SQL insert statement to create the default experiment as a hack, since
experiment table uses 'experiment_id' column is a PK and is also set to auto increment.
MySQL and other implementation do not allow value '0' for such cases.
ToDo: Identify a less hacky mechanism to create default experiment 0
"""
table = SqlExperiment.__tablename__
default_experiment = {
SqlExperiment.experiment_id.name: int(SqlAlchemyStore.DEFAULT_EXPERIMENT_ID),
SqlExperiment.name.name: Experiment.DEFAULT_EXPERIMENT_NAME,
SqlExperiment.artifact_location.name: str(self._get_artifact_location(0)),
SqlExperiment.lifecycle_stage.name: LifecycleStage.ACTIVE,
}
def decorate(s):
if isinstance(s, str):
return "'{}'".format(s)
else:
return "{}".format(s)
# Get a list of keys to ensure we have a deterministic ordering
columns = list(default_experiment.keys())
values = ", ".join([decorate(default_experiment.get(c)) for c in columns])
try:
self._set_zero_value_insertion_for_autoincrement_column(session)
session.execute(
"INSERT INTO {} ({}) VALUES ({});".format(table, ", ".join(columns), values)
)
finally:
self._unset_zero_value_insertion_for_autoincrement_column(session)
|
def _create_default_experiment(self, session):
"""
MLflow UI and client code expects a default experiment with ID 0.
This method uses SQL insert statement to create the default experiment as a hack, since
experiment table uses 'experiment_id' column is a PK and is also set to auto increment.
MySQL and other implementation do not allow value '0' for such cases.
ToDo: Identify a less hacky mechanism to create default experiment 0
"""
table = SqlExperiment.__tablename__
default_experiment = {
SqlExperiment.experiment_id.name: int(SqlAlchemyStore.DEFAULT_EXPERIMENT_ID),
SqlExperiment.name.name: Experiment.DEFAULT_EXPERIMENT_NAME,
SqlExperiment.artifact_location.name: str(self._get_artifact_location(0)),
SqlExperiment.lifecycle_stage.name: LifecycleStage.ACTIVE,
}
def decorate(s):
if isinstance(s, str):
return "'{}'".format(s)
else:
return "{}".format(s)
# Get a list of keys to ensure we have a deterministic ordering
columns = list(default_experiment.keys())
values = ", ".join([decorate(default_experiment.get(c)) for c in columns])
try:
self._set_no_auto_for_zero_values(session)
session.execute(
"INSERT INTO {} ({}) VALUES ({});".format(table, ", ".join(columns), values)
)
finally:
self._unset_no_auto_for_zero_values(session)
|
https://github.com/mlflow/mlflow/issues/1748
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
pyodbc.IntegrityError: ('23000', "[23000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]Cannot insert explicit value for identity column in table 'experiments' when IDENTITY_INSERT is set to OFF. (544) (SQLExecDirectW)")
|
pyodbc.IntegrityError
|
def _create_dockerfile(output_path, mlflow_path=None):
"""
Creates a Dockerfile containing additional Docker build steps to execute
when building the Azure container image. These build steps perform the following tasks:
- Install MLflow
:param output_path: The path where the Dockerfile will be written.
:param mlflow_path: Path to a local copy of the MLflow GitHub repository. If specified, the
Dockerfile command for MLflow installation will install MLflow from this
directory. Otherwise, it will install MLflow from pip.
"""
docker_cmds = ["RUN apt-get update && apt-get install -y default-jre"]
docker_cmds.append("RUN pip install azureml-sdk")
if mlflow_path is not None:
mlflow_install_cmd = "RUN pip install -e {mlflow_path}".format(
mlflow_path=_get_container_path(mlflow_path)
)
elif not mlflow_version.endswith("dev"):
mlflow_install_cmd = "RUN pip install mlflow=={mlflow_version}".format(
mlflow_version=mlflow_version
)
else:
raise MlflowException(
"You are running a 'dev' version of MLflow: `{mlflow_version}` that cannot be"
" installed from pip. In order to build a container image, either specify the"
" path to a local copy of the MLflow GitHub repository using the `mlflow_home`"
" parameter or install a release version of MLflow from pip".format(
mlflow_version=mlflow_version
)
)
docker_cmds.append(mlflow_install_cmd)
with open(output_path, "w") as f:
f.write("\n".join(docker_cmds))
|
def _create_dockerfile(output_path, mlflow_path=None):
"""
Creates a Dockerfile containing additional Docker build steps to execute
when building the Azure container image. These build steps perform the following tasks:
- Install MLflow
:param output_path: The path where the Dockerfile will be written.
:param mlflow_path: Path to a local copy of the MLflow GitHub repository. If specified, the
Dockerfile command for MLflow installation will install MLflow from this
directory. Otherwise, it will install MLflow from pip.
"""
docker_cmds = ["RUN pip install azureml-sdk"]
if mlflow_path is not None:
mlflow_install_cmd = "RUN pip install -e {mlflow_path}".format(
mlflow_path=_get_container_path(mlflow_path)
)
elif not mlflow_version.endswith("dev"):
mlflow_install_cmd = "RUN pip install mlflow=={mlflow_version}".format(
mlflow_version=mlflow_version
)
else:
raise MlflowException(
"You are running a 'dev' version of MLflow: `{mlflow_version}` that cannot be"
" installed from pip. In order to build a container image, either specify the"
" path to a local copy of the MLflow GitHub repository using the `mlflow_home`"
" parameter or install a release version of MLflow from pip".format(
mlflow_version=mlflow_version
)
)
docker_cmds.append(mlflow_install_cmd)
with open(output_path, "w") as f:
f.write("\n".join(docker_cmds))
|
https://github.com/mlflow/mlflow/issues/1750
|
2019-08-16T16:43:37,939683552+00:00 - iot-server/run
2019-08-16T16:43:37,940440658+00:00 - gunicorn/run
2019-08-16T16:43:37,941200563+00:00 - rsyslog/run
2019-08-16T16:43:38,137124526+00:00 - nginx/run
EdgeHubConnectionString and IOTEDGE_IOTHUBHOSTNAME are not set. Exiting...
2019-08-16T16:43:40,241560271+00:00 - iot-server/finish 1 0
2019-08-16T16:43:40,245790300+00:00 - Exit code 1 is normal. Not restarting iot-server.
Starting gunicorn 19.6.0
Listening at: http://127.0.0.1:31311 (11)
Using worker: sync
worker timeout is set to 300
Booting worker with pid: 43
Initializing logger
Starting up app insights client
Starting up request id generator
Starting up app insight hooks
Invoking user's init function
2019-08-16 16:44:36,744 | azureml.core.run | DEBUG | Could not load run context RunEnvironmentException:
Message: Could not load a submitted run, if outside of an execution context, use experiment.start_logging to initialize an azureml.core.Run.
InnerException None
ErrorResponse {"error": {"message": "Could not load a submitted run, if outside of an execution context, use experiment.start_logging to initialize an azureml.core.Run."}}, switching offline: False
2019-08-16 16:44:36,744 | azureml.core.run | DEBUG | Could not load the run context and allow_offline set to False
2019-08-16 16:44:36,744 | azureml.core.model | DEBUG | RunEnvironmentException: RunEnvironmentException:
Message: Could not load a submitted run, if outside of an execution context, use experiment.start_logging to initialize an azureml.core.Run.
InnerException RunEnvironmentException:
Message: Could not load a submitted run, if outside of an execution context, use experiment.start_logging to initialize an azureml.core.Run.
InnerException None
ErrorResponse {"error": {"message": "Could not load a submitted run, if outside of an execution context, use experiment.start_logging to initialize an azureml.core.Run."}}
ErrorResponse {"error": {"message": "Could not load a submitted run, if outside of an execution context, use experiment.start_logging to initialize an azureml.core.Run."}}
2019-08-16 16:44:36,745 | azureml.core.model | DEBUG | Using passed in version 1
2019-08-16 16:44:36,745 | azureml.core.model | DEBUG | Found model path at azureml-models/mlflow-y4ct2veitj6uszrg9sjjfq/1/model2
2019/08/16 16:44:36 WARNING mlflow.pyfunc: The version of Python that the model was saved in, `Python 3.7.3`, differs from the version of Python that is currently running, `Python 3.6.8`, and may be incompatible
JAVA_HOME is not set
User's init function failed
Encountered Exception Traceback (most recent call last):
File "/var/azureml-server/aml_blueprint.py", line 162, in register
main.init()
File "/var/azureml-app/main.py", line 88, in init
driver_module.init()
File "execution_script.py", line 12, in init
model = load_pyfunc(model_path)
File "/opt/miniconda/lib/python3.6/site-packages/mlflow/pyfunc/__init__.py", line 314, in load_pyfunc
return importlib.import_module(conf[MAIN])._load_pyfunc(data_path)
File "/opt/miniconda/lib/python3.6/site-packages/mlflow/spark.py", line 383, in _load_pyfunc
.master("local[1]").getOrCreate()
File "/opt/miniconda/lib/python3.6/site-packages/pyspark/sql/session.py", line 173, in getOrCreate
sc = SparkContext.getOrCreate(sparkConf)
File "/opt/miniconda/lib/python3.6/site-packages/pyspark/context.py", line 367, in getOrCreate
SparkContext(conf=conf or SparkConf())
File "/opt/miniconda/lib/python3.6/site-packages/pyspark/context.py", line 133, in __init__
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
File "/opt/miniconda/lib/python3.6/site-packages/pyspark/context.py", line 316, in _ensure_initialized
SparkContext._gateway = gateway or launch_gateway(conf)
File "/opt/miniconda/lib/python3.6/site-packages/pyspark/java_gateway.py", line 46, in launch_gateway
return _launch_gateway(conf)
File "/opt/miniconda/lib/python3.6/site-packages/pyspark/java_gateway.py", line 108, in _launch_gateway
raise Exception("Java gateway process exited before sending its port number")
Exception: Java gateway process exited before sending its port number
Worker exiting (pid: 43)
Shutting down: Master
Reason: Worker failed to boot.
2019-08-16T16:44:41,539825892+00:00 - gunicorn/finish 3 0
2019-08-16T16:44:41,541275102+00:00 - Exit code 3 is not normal. Killing image.
|
Exception
|
def autolog():
"""
Enable automatic logging from TensorFlow to MLflow.
Logs loss and any other metrics specified in the fit
function, and optimizer data as parameters. Model checkpoints
are logged as artifacts to a 'models' directory.
"""
import keras
class __MLflowKerasCallback(keras.callbacks.Callback):
"""
Callback for auto-logging metrics and parameters.
Records available logs after each epoch.
Records model structural information as params after training finishes.
"""
def on_epoch_end(self, epoch, logs=None):
if not logs:
return
try_mlflow_log(mlflow.log_metrics, logs, step=epoch)
def on_train_end(self, logs=None):
try_mlflow_log(mlflow.log_param, "num_layers", len(self.model.layers))
try_mlflow_log(
mlflow.log_param, "optimizer_name", type(self.model.optimizer).__name__
)
if hasattr(self.model.optimizer, "lr"):
lr = (
self.model.optimizer.lr
if type(self.model.optimizer.lr) is float
else keras.backend.eval(self.model.optimizer.lr)
)
try_mlflow_log(mlflow.log_param, "learning_rate", lr)
if hasattr(self.model.optimizer, "epsilon"):
epsilon = (
self.model.optimizer.epsilon
if type(self.model.optimizer.epsilon) is float
else keras.backend.eval(self.model.optimizer.epsilon)
)
try_mlflow_log(mlflow.log_param, "epsilon", epsilon)
sum_list = []
self.model.summary(print_fn=sum_list.append)
summary = "\n".join(sum_list)
try_mlflow_log(mlflow.set_tag, "summary", summary)
try_mlflow_log(log_model, self.model, artifact_path="model")
@gorilla.patch(keras.Model)
def fit(self, *args, **kwargs):
original = gorilla.get_original_attribute(keras.Model, "fit")
if len(args) >= 6:
l = list(args)
l[5] += [__MLflowKerasCallback()]
args = tuple(l)
elif "callbacks" in kwargs:
kwargs["callbacks"] += [__MLflowKerasCallback()]
else:
kwargs["callbacks"] = [__MLflowKerasCallback()]
return original(self, *args, **kwargs)
settings = gorilla.Settings(allow_hit=True, store_hit=True)
patch = gorilla.Patch(keras.Model, "fit", fit, settings=settings)
gorilla.apply(patch)
|
def autolog():
"""
Enable automatic logging from TensorFlow to MLflow.
Logs loss and any other metrics specified in the fit
function, and optimizer data as parameters. Model checkpoints
are logged as artifacts to a 'models' directory.
"""
import keras
class __MLflowKerasCallback(keras.callbacks.Callback):
"""
Callback for auto-logging metrics and parameters.
Records available logs after each epoch.
Records model structural information as params after training finishes.
"""
def on_epoch_end(self, epoch, logs=None):
if not logs:
return
try:
mlflow.log_metrics(logs, step=epoch)
except mlflow.exceptions.MlflowException as e:
warnings.warn("Logging to MLflow failed: " + str(e))
def on_train_end(self, logs=None):
try:
mlflow.log_param("num_layers", len(self.model.layers))
mlflow.log_param("optimizer_name", type(self.model.optimizer).__name__)
if hasattr(self.model.optimizer, "lr"):
lr = (
self.model.optimizer.lr
if type(self.model.optimizer.lr) is float
else keras.backend.eval(self.model.optimizer.lr)
)
mlflow.log_param("learning_rate", lr)
if hasattr(self.model.optimizer, "epsilon"):
epsilon = (
self.model.optimizer.epsilon
if type(self.model.optimizer.epsilon) is float
else keras.backend.eval(self.model.optimizer.epsilon)
)
mlflow.log_param("epsilon", epsilon)
sum_list = []
self.model.summary(print_fn=sum_list.append)
summary = "\n".join(sum_list)
mlflow.set_tag("summary", summary)
log_model(self.model, artifact_path="model")
except mlflow.exceptions.MlflowException as e:
warnings.warn("Logging to Mlflow failed: " + str(e))
@gorilla.patch(keras.Model)
def fit(self, *args, **kwargs):
original = gorilla.get_original_attribute(keras.Model, "fit")
if len(args) >= 6:
l = list(args)
l[5] += [__MLflowKerasCallback()]
args = tuple(l)
elif "callbacks" in kwargs:
kwargs["callbacks"] += [__MLflowKerasCallback()]
else:
kwargs["callbacks"] = [__MLflowKerasCallback()]
return original(self, *args, **kwargs)
settings = gorilla.Settings(allow_hit=True, store_hit=True)
patch = gorilla.Patch(keras.Model, "fit", fit, settings=settings)
gorilla.apply(patch)
|
https://github.com/mlflow/mlflow/issues/1688
|
2019/08/01 18:01:53 ERROR mlflow.server: Exception on /api/2.0/mlflow/runs/set-tag [POST]
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(250)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/mlflow/store/sqlalchemy_store.py", line 148, in make_managed_session
session.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 1027, in commit
self.transaction.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 494, in commit
self._prepare_impl()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 473, in _prepare_impl
self.session.flush()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2459, in flush
self._flush(objects)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2597, in _flush
transaction.rollback(_capture_exception=True)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 68, in __exit__
compat.reraise(exc_type, exc_value, exc_tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 153, in reraise
raise value
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2557, in _flush
flush_context.execute()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
uow,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 245, in save_obj
insert,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 1084, in _emit_insert_statements
c = cached_connections[connection].execute(statement, multiparams)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 287, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1107, in _execute_clauseelement
distilled_params,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1248, in _execute_context
e, statement, parameters, cursor, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1466, in _handle_dbapi_exception
util.raise_from_cause(sqlalchemy_exception, exc_info)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 398, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 152, in reraise
raise value.with_traceback(tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(250)
[SQL: INSERT INTO tags (key, value, run_uuid) VALUES (%(key)s, %(value)s, %(run_uuid)s)]
[parameters: {'key': 'summary', 'value': '_________________________________________________________________\nLayer (type) Output Shape Param # \n=============== ... (713 characters truncated) ... =======\nTotal params: 536,110\nTrainable params: 536,110\nNon-trainable params: 0\n_________________________________________________________________', 'run_uuid': ‘123f3e6ce63d41e9ba51644694b4a37f'}]
(Background on this error at: http://sqlalche.me/e/9h9h)
|
sqlalchemy.exc.DataError
|
def on_epoch_end(self, epoch, logs=None):
if not logs:
return
try_mlflow_log(mlflow.log_metrics, logs, step=epoch)
|
def on_epoch_end(self, epoch, logs=None):
if not logs:
return
try:
mlflow.log_metrics(logs, step=epoch)
except mlflow.exceptions.MlflowException as e:
warnings.warn("Logging to MLflow failed: " + str(e))
|
https://github.com/mlflow/mlflow/issues/1688
|
2019/08/01 18:01:53 ERROR mlflow.server: Exception on /api/2.0/mlflow/runs/set-tag [POST]
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(250)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/mlflow/store/sqlalchemy_store.py", line 148, in make_managed_session
session.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 1027, in commit
self.transaction.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 494, in commit
self._prepare_impl()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 473, in _prepare_impl
self.session.flush()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2459, in flush
self._flush(objects)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2597, in _flush
transaction.rollback(_capture_exception=True)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 68, in __exit__
compat.reraise(exc_type, exc_value, exc_tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 153, in reraise
raise value
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2557, in _flush
flush_context.execute()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
uow,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 245, in save_obj
insert,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 1084, in _emit_insert_statements
c = cached_connections[connection].execute(statement, multiparams)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 287, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1107, in _execute_clauseelement
distilled_params,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1248, in _execute_context
e, statement, parameters, cursor, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1466, in _handle_dbapi_exception
util.raise_from_cause(sqlalchemy_exception, exc_info)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 398, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 152, in reraise
raise value.with_traceback(tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(250)
[SQL: INSERT INTO tags (key, value, run_uuid) VALUES (%(key)s, %(value)s, %(run_uuid)s)]
[parameters: {'key': 'summary', 'value': '_________________________________________________________________\nLayer (type) Output Shape Param # \n=============== ... (713 characters truncated) ... =======\nTotal params: 536,110\nTrainable params: 536,110\nNon-trainable params: 0\n_________________________________________________________________', 'run_uuid': ‘123f3e6ce63d41e9ba51644694b4a37f'}]
(Background on this error at: http://sqlalche.me/e/9h9h)
|
sqlalchemy.exc.DataError
|
def on_train_end(self, logs=None):
try_mlflow_log(mlflow.log_param, "num_layers", len(self.model.layers))
try_mlflow_log(
mlflow.log_param, "optimizer_name", type(self.model.optimizer).__name__
)
if hasattr(self.model.optimizer, "lr"):
lr = (
self.model.optimizer.lr
if type(self.model.optimizer.lr) is float
else keras.backend.eval(self.model.optimizer.lr)
)
try_mlflow_log(mlflow.log_param, "learning_rate", lr)
if hasattr(self.model.optimizer, "epsilon"):
epsilon = (
self.model.optimizer.epsilon
if type(self.model.optimizer.epsilon) is float
else keras.backend.eval(self.model.optimizer.epsilon)
)
try_mlflow_log(mlflow.log_param, "epsilon", epsilon)
sum_list = []
self.model.summary(print_fn=sum_list.append)
summary = "\n".join(sum_list)
try_mlflow_log(mlflow.set_tag, "summary", summary)
try_mlflow_log(log_model, self.model, artifact_path="model")
|
def on_train_end(self, logs=None):
try:
mlflow.log_param("num_layers", len(self.model.layers))
mlflow.log_param("optimizer_name", type(self.model.optimizer).__name__)
if hasattr(self.model.optimizer, "lr"):
lr = (
self.model.optimizer.lr
if type(self.model.optimizer.lr) is float
else keras.backend.eval(self.model.optimizer.lr)
)
mlflow.log_param("learning_rate", lr)
if hasattr(self.model.optimizer, "epsilon"):
epsilon = (
self.model.optimizer.epsilon
if type(self.model.optimizer.epsilon) is float
else keras.backend.eval(self.model.optimizer.epsilon)
)
mlflow.log_param("epsilon", epsilon)
sum_list = []
self.model.summary(print_fn=sum_list.append)
summary = "\n".join(sum_list)
mlflow.set_tag("summary", summary)
log_model(self.model, artifact_path="model")
except mlflow.exceptions.MlflowException as e:
warnings.warn("Logging to Mlflow failed: " + str(e))
|
https://github.com/mlflow/mlflow/issues/1688
|
2019/08/01 18:01:53 ERROR mlflow.server: Exception on /api/2.0/mlflow/runs/set-tag [POST]
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(250)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/mlflow/store/sqlalchemy_store.py", line 148, in make_managed_session
session.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 1027, in commit
self.transaction.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 494, in commit
self._prepare_impl()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 473, in _prepare_impl
self.session.flush()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2459, in flush
self._flush(objects)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2597, in _flush
transaction.rollback(_capture_exception=True)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 68, in __exit__
compat.reraise(exc_type, exc_value, exc_tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 153, in reraise
raise value
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2557, in _flush
flush_context.execute()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
uow,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 245, in save_obj
insert,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 1084, in _emit_insert_statements
c = cached_connections[connection].execute(statement, multiparams)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 287, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1107, in _execute_clauseelement
distilled_params,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1248, in _execute_context
e, statement, parameters, cursor, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1466, in _handle_dbapi_exception
util.raise_from_cause(sqlalchemy_exception, exc_info)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 398, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 152, in reraise
raise value.with_traceback(tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(250)
[SQL: INSERT INTO tags (key, value, run_uuid) VALUES (%(key)s, %(value)s, %(run_uuid)s)]
[parameters: {'key': 'summary', 'value': '_________________________________________________________________\nLayer (type) Output Shape Param # \n=============== ... (713 characters truncated) ... =======\nTotal params: 536,110\nTrainable params: 536,110\nNon-trainable params: 0\n_________________________________________________________________', 'run_uuid': ‘123f3e6ce63d41e9ba51644694b4a37f'}]
(Background on this error at: http://sqlalche.me/e/9h9h)
|
sqlalchemy.exc.DataError
|
def on_train_end(self, logs=None): # pylint: disable=unused-argument
opt = self.model.optimizer
if hasattr(opt, "optimizer"):
opt = opt.optimizer
try_mlflow_log(mlflow.log_param, "optimizer_name", type(opt).__name__)
if hasattr(opt, "_lr"):
lr = (
opt._lr
if type(opt._lr) is float
else tensorflow.keras.backend.eval(opt._lr)
)
try_mlflow_log(mlflow.log_param("learning_rate", lr))
if hasattr(opt, "_epsilon"):
epsilon = (
opt._epsilon
if type(opt._epsilon) is float
else tensorflow.keras.backend.eval(opt._epsilon)
)
try_mlflow_log(mlflow.log_param, "epsilon", epsilon)
l = []
self.model.summary(print_fn=l.append)
summary = "\n".join(l)
try_mlflow_log(mlflow.set_tag, "summary", summary)
try_mlflow_log(mlflow.keras.log_model, self.model, artifact_path="model")
|
def on_train_end(self, logs=None): # pylint: disable=unused-argument
opt = self.model.optimizer
if hasattr(opt, "optimizer"):
opt = opt.optimizer
mlflow.log_param("optimizer_name", type(opt).__name__)
if hasattr(opt, "_lr"):
lr = (
opt._lr
if type(opt._lr) is float
else tensorflow.keras.backend.eval(opt._lr)
)
mlflow.log_param("learning_rate", lr)
if hasattr(opt, "_epsilon"):
epsilon = (
opt._epsilon
if type(opt._epsilon) is float
else tensorflow.keras.backend.eval(opt._epsilon)
)
mlflow.log_param("epsilon", epsilon)
l = []
self.model.summary(print_fn=l.append)
summary = "\n".join(l)
mlflow.set_tag("summary", summary)
mlflow.keras.log_model(self.model, artifact_path="model")
|
https://github.com/mlflow/mlflow/issues/1688
|
2019/08/01 18:01:53 ERROR mlflow.server: Exception on /api/2.0/mlflow/runs/set-tag [POST]
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(250)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/mlflow/store/sqlalchemy_store.py", line 148, in make_managed_session
session.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 1027, in commit
self.transaction.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 494, in commit
self._prepare_impl()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 473, in _prepare_impl
self.session.flush()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2459, in flush
self._flush(objects)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2597, in _flush
transaction.rollback(_capture_exception=True)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 68, in __exit__
compat.reraise(exc_type, exc_value, exc_tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 153, in reraise
raise value
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2557, in _flush
flush_context.execute()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
uow,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 245, in save_obj
insert,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 1084, in _emit_insert_statements
c = cached_connections[connection].execute(statement, multiparams)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 287, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1107, in _execute_clauseelement
distilled_params,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1248, in _execute_context
e, statement, parameters, cursor, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1466, in _handle_dbapi_exception
util.raise_from_cause(sqlalchemy_exception, exc_info)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 398, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 152, in reraise
raise value.with_traceback(tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(250)
[SQL: INSERT INTO tags (key, value, run_uuid) VALUES (%(key)s, %(value)s, %(run_uuid)s)]
[parameters: {'key': 'summary', 'value': '_________________________________________________________________\nLayer (type) Output Shape Param # \n=============== ... (713 characters truncated) ... =======\nTotal params: 536,110\nTrainable params: 536,110\nNon-trainable params: 0\n_________________________________________________________________', 'run_uuid': ‘123f3e6ce63d41e9ba51644694b4a37f'}]
(Background on this error at: http://sqlalche.me/e/9h9h)
|
sqlalchemy.exc.DataError
|
def _log_artifacts_with_warning(**kwargs):
try_mlflow_log(mlflow.log_artifacts, **kwargs)
|
def _log_artifacts_with_warning(**kwargs):
try:
mlflow.log_artifacts(**kwargs)
except MlflowException as e:
warnings.warn("Logging to MLflow failed: " + str(e))
|
https://github.com/mlflow/mlflow/issues/1688
|
2019/08/01 18:01:53 ERROR mlflow.server: Exception on /api/2.0/mlflow/runs/set-tag [POST]
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(250)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/mlflow/store/sqlalchemy_store.py", line 148, in make_managed_session
session.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 1027, in commit
self.transaction.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 494, in commit
self._prepare_impl()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 473, in _prepare_impl
self.session.flush()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2459, in flush
self._flush(objects)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2597, in _flush
transaction.rollback(_capture_exception=True)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 68, in __exit__
compat.reraise(exc_type, exc_value, exc_tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 153, in reraise
raise value
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2557, in _flush
flush_context.execute()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
uow,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 245, in save_obj
insert,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 1084, in _emit_insert_statements
c = cached_connections[connection].execute(statement, multiparams)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 287, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1107, in _execute_clauseelement
distilled_params,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1248, in _execute_context
e, statement, parameters, cursor, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1466, in _handle_dbapi_exception
util.raise_from_cause(sqlalchemy_exception, exc_info)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 398, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 152, in reraise
raise value.with_traceback(tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(250)
[SQL: INSERT INTO tags (key, value, run_uuid) VALUES (%(key)s, %(value)s, %(run_uuid)s)]
[parameters: {'key': 'summary', 'value': '_________________________________________________________________\nLayer (type) Output Shape Param # \n=============== ... (713 characters truncated) ... =======\nTotal params: 536,110\nTrainable params: 536,110\nNon-trainable params: 0\n_________________________________________________________________', 'run_uuid': ‘123f3e6ce63d41e9ba51644694b4a37f'}]
(Background on this error at: http://sqlalche.me/e/9h9h)
|
sqlalchemy.exc.DataError
|
def _flush_queue():
"""
Flush the metric queue and log contents in batches to MLflow.
Queue is divided into batches according to run id.
"""
global _metric_queue
client = mlflow.tracking.MlflowClient()
dic = _assoc_list_to_map(_metric_queue)
for key in dic:
try_mlflow_log(client.log_batch, key, metrics=dic[key], params=[], tags=[])
_metric_queue = []
|
def _flush_queue():
"""
Flush the metric queue and log contents in batches to MLflow.
Queue is divided into batches according to run id.
"""
global _metric_queue
try:
client = mlflow.tracking.MlflowClient()
dic = _assoc_list_to_map(_metric_queue)
for key in dic:
client.log_batch(key, metrics=dic[key], params=[], tags=[])
except MlflowException as e:
warnings.warn("Logging to MLflow failed: " + str(e))
finally:
_metric_queue = []
|
https://github.com/mlflow/mlflow/issues/1688
|
2019/08/01 18:01:53 ERROR mlflow.server: Exception on /api/2.0/mlflow/runs/set-tag [POST]
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(250)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/mlflow/store/sqlalchemy_store.py", line 148, in make_managed_session
session.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 1027, in commit
self.transaction.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 494, in commit
self._prepare_impl()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 473, in _prepare_impl
self.session.flush()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2459, in flush
self._flush(objects)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2597, in _flush
transaction.rollback(_capture_exception=True)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 68, in __exit__
compat.reraise(exc_type, exc_value, exc_tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 153, in reraise
raise value
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2557, in _flush
flush_context.execute()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
uow,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 245, in save_obj
insert,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 1084, in _emit_insert_statements
c = cached_connections[connection].execute(statement, multiparams)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 287, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1107, in _execute_clauseelement
distilled_params,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1248, in _execute_context
e, statement, parameters, cursor, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1466, in _handle_dbapi_exception
util.raise_from_cause(sqlalchemy_exception, exc_info)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 398, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 152, in reraise
raise value.with_traceback(tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(250)
[SQL: INSERT INTO tags (key, value, run_uuid) VALUES (%(key)s, %(value)s, %(run_uuid)s)]
[parameters: {'key': 'summary', 'value': '_________________________________________________________________\nLayer (type) Output Shape Param # \n=============== ... (713 characters truncated) ... =======\nTotal params: 536,110\nTrainable params: 536,110\nNon-trainable params: 0\n_________________________________________________________________', 'run_uuid': ‘123f3e6ce63d41e9ba51644694b4a37f'}]
(Background on this error at: http://sqlalche.me/e/9h9h)
|
sqlalchemy.exc.DataError
|
def autolog(every_n_iter=100):
# pylint: disable=E0611
"""
Enable automatic logging from TensorFlow to MLflow. If applicable,
model checkpoints are logged as artifacts to a 'models' directory, along
with any TensorBoard log data.
Refer to the tracking documentation for
information on what is logged with different TensorFlow workflows.
:param every_n_iter: The frequency with which metrics should be logged.
Defaults to 100. Ex: a value of 100 will log metrics
at step 0, 100, 200, etc.
"""
global _LOG_EVERY_N_STEPS
_LOG_EVERY_N_STEPS = every_n_iter
from distutils.version import StrictVersion
if StrictVersion(tensorflow.__version__) < StrictVersion("1.12") or StrictVersion(
tensorflow.__version__
) >= StrictVersion("2.0"):
warnings.warn(
"Could not log to MLflow. Only TensorFlow versions"
+ "1.12 <= v < 2.0.0 are supported."
)
return
try:
from tensorflow.python.summary.writer.event_file_writer import EventFileWriter
from tensorflow.python.summary.writer.event_file_writer_v2 import (
EventFileWriterV2,
)
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary.writer.writer import FileWriter
except ImportError:
warnings.warn(
"Could not log to MLflow. Only TensorFlow versions"
+ "1.12 <= v < 2.0.0 are supported."
)
return
@gorilla.patch(tensorflow.estimator.Estimator)
def export_saved_model(self, *args, **kwargs):
original = gorilla.get_original_attribute(
tensorflow.estimator.Estimator, "export_saved_model"
)
serialized = original(self, *args, **kwargs)
try_mlflow_log(
log_model,
tf_saved_model_dir=serialized.decode("utf-8"),
tf_meta_graph_tags=[tag_constants.SERVING],
tf_signature_def_key="predict",
artifact_path="model",
)
return serialized
@gorilla.patch(tensorflow.estimator.Estimator)
def export_savedmodel(self, *args, **kwargs):
original = gorilla.get_original_attribute(
tensorflow.estimator.Estimator, "export_savedmodel"
)
serialized = original(self, *args, **kwargs)
try_mlflow_log(
log_model,
tf_saved_model_dir=serialized.decode("utf-8"),
tf_meta_graph_tags=[tag_constants.SERVING],
tf_signature_def_key="predict",
artifact_path="model",
)
return serialized
@gorilla.patch(tensorflow.keras.Model)
def fit(self, *args, **kwargs):
original = gorilla.get_original_attribute(tensorflow.keras.Model, "fit")
if len(args) >= 6:
l = list(args)
l[5], log_dir = _setup_callbacks(l[5])
args = tuple(l)
elif "callbacks" in kwargs:
kwargs["callbacks"], log_dir = _setup_callbacks(kwargs["callbacks"])
else:
kwargs["callbacks"], log_dir = _setup_callbacks([])
result = original(self, *args, **kwargs)
_flush_queue()
_log_artifacts_with_warning(local_dir=log_dir, artifact_path="tensorboard_logs")
shutil.rmtree(log_dir)
return result
@gorilla.patch(EventFileWriter)
def add_event(self, event):
_log_event(event)
original = gorilla.get_original_attribute(EventFileWriter, "add_event")
return original(self, event)
@gorilla.patch(FileWriter)
def add_summary(self, *args, **kwargs):
original = gorilla.get_original_attribute(FileWriter, "add_summary")
result = original(self, *args, **kwargs)
_flush_queue()
return result
settings = gorilla.Settings(allow_hit=True, store_hit=True)
patches = [
gorilla.Patch(EventFileWriter, "add_event", add_event, settings=settings),
gorilla.Patch(EventFileWriterV2, "add_event", add_event, settings=settings),
gorilla.Patch(tensorflow.keras.Model, "fit", fit, settings=settings),
gorilla.Patch(
tensorflow.estimator.Estimator,
"export_saved_model",
export_saved_model,
settings=settings,
),
gorilla.Patch(
tensorflow.estimator.Estimator,
"export_savedmodel",
export_savedmodel,
settings=settings,
),
gorilla.Patch(FileWriter, "add_summary", add_summary, settings=settings),
]
for x in patches:
gorilla.apply(x)
|
def autolog(every_n_iter=100):
# pylint: disable=E0611
"""
Enable automatic logging from TensorFlow to MLflow. If applicable,
model checkpoints are logged as artifacts to a 'models' directory, along
with any TensorBoard log data.
Refer to the tracking documentation for
information on what is logged with different TensorFlow workflows.
:param every_n_iter: The frequency with which metrics should be logged.
Defaults to 100. Ex: a value of 100 will log metrics
at step 0, 100, 200, etc.
"""
global _LOG_EVERY_N_STEPS
_LOG_EVERY_N_STEPS = every_n_iter
from distutils.version import StrictVersion
if StrictVersion(tensorflow.__version__) < StrictVersion("1.12") or StrictVersion(
tensorflow.__version__
) >= StrictVersion("2.0"):
warnings.warn(
"Could not log to MLflow. Only TensorFlow versions"
+ "1.12 <= v < 2.0.0 are supported."
)
return
try:
from tensorflow.python.summary.writer.event_file_writer import EventFileWriter
from tensorflow.python.summary.writer.event_file_writer_v2 import (
EventFileWriterV2,
)
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary.writer.writer import FileWriter
except ImportError:
warnings.warn(
"Could not log to MLflow. Only TensorFlow versions"
+ "1.12 <= v < 2.0.0 are supported."
)
return
@gorilla.patch(tensorflow.estimator.Estimator)
def export_saved_model(self, *args, **kwargs):
original = gorilla.get_original_attribute(
tensorflow.estimator.Estimator, "export_saved_model"
)
serialized = original(self, *args, **kwargs)
try:
log_model(
tf_saved_model_dir=serialized.decode("utf-8"),
tf_meta_graph_tags=[tag_constants.SERVING],
tf_signature_def_key="predict",
artifact_path="model",
)
except MlflowException as e:
warnings.warn("Logging to MLflow failed: " + str(e))
return serialized
@gorilla.patch(tensorflow.estimator.Estimator)
def export_savedmodel(self, *args, **kwargs):
original = gorilla.get_original_attribute(
tensorflow.estimator.Estimator, "export_savedmodel"
)
serialized = original(self, *args, **kwargs)
try:
log_model(
tf_saved_model_dir=serialized.decode("utf-8"),
tf_meta_graph_tags=[tag_constants.SERVING],
tf_signature_def_key="predict",
artifact_path="model",
)
except MlflowException as e:
warnings.warn("Logging to MLflow failed: " + str(e))
return serialized
@gorilla.patch(tensorflow.keras.Model)
def fit(self, *args, **kwargs):
original = gorilla.get_original_attribute(tensorflow.keras.Model, "fit")
if len(args) >= 6:
l = list(args)
l[5], log_dir = _setup_callbacks(l[5])
args = tuple(l)
elif "callbacks" in kwargs:
kwargs["callbacks"], log_dir = _setup_callbacks(kwargs["callbacks"])
else:
kwargs["callbacks"], log_dir = _setup_callbacks([])
result = original(self, *args, **kwargs)
_flush_queue()
_log_artifacts_with_warning(local_dir=log_dir, artifact_path="tensorboard_logs")
shutil.rmtree(log_dir)
return result
@gorilla.patch(EventFileWriter)
def add_event(self, event):
_log_event(event)
original = gorilla.get_original_attribute(EventFileWriter, "add_event")
return original(self, event)
@gorilla.patch(FileWriter)
def add_summary(self, *args, **kwargs):
original = gorilla.get_original_attribute(FileWriter, "add_summary")
result = original(self, *args, **kwargs)
_flush_queue()
return result
settings = gorilla.Settings(allow_hit=True, store_hit=True)
patches = [
gorilla.Patch(EventFileWriter, "add_event", add_event, settings=settings),
gorilla.Patch(EventFileWriterV2, "add_event", add_event, settings=settings),
gorilla.Patch(tensorflow.keras.Model, "fit", fit, settings=settings),
gorilla.Patch(
tensorflow.estimator.Estimator,
"export_saved_model",
export_saved_model,
settings=settings,
),
gorilla.Patch(
tensorflow.estimator.Estimator,
"export_savedmodel",
export_savedmodel,
settings=settings,
),
gorilla.Patch(FileWriter, "add_summary", add_summary, settings=settings),
]
for x in patches:
gorilla.apply(x)
|
https://github.com/mlflow/mlflow/issues/1688
|
2019/08/01 18:01:53 ERROR mlflow.server: Exception on /api/2.0/mlflow/runs/set-tag [POST]
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(250)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/mlflow/store/sqlalchemy_store.py", line 148, in make_managed_session
session.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 1027, in commit
self.transaction.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 494, in commit
self._prepare_impl()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 473, in _prepare_impl
self.session.flush()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2459, in flush
self._flush(objects)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2597, in _flush
transaction.rollback(_capture_exception=True)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 68, in __exit__
compat.reraise(exc_type, exc_value, exc_tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 153, in reraise
raise value
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2557, in _flush
flush_context.execute()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
uow,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 245, in save_obj
insert,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 1084, in _emit_insert_statements
c = cached_connections[connection].execute(statement, multiparams)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 287, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1107, in _execute_clauseelement
distilled_params,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1248, in _execute_context
e, statement, parameters, cursor, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1466, in _handle_dbapi_exception
util.raise_from_cause(sqlalchemy_exception, exc_info)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 398, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 152, in reraise
raise value.with_traceback(tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(250)
[SQL: INSERT INTO tags (key, value, run_uuid) VALUES (%(key)s, %(value)s, %(run_uuid)s)]
[parameters: {'key': 'summary', 'value': '_________________________________________________________________\nLayer (type) Output Shape Param # \n=============== ... (713 characters truncated) ... =======\nTotal params: 536,110\nTrainable params: 536,110\nNon-trainable params: 0\n_________________________________________________________________', 'run_uuid': ‘123f3e6ce63d41e9ba51644694b4a37f'}]
(Background on this error at: http://sqlalche.me/e/9h9h)
|
sqlalchemy.exc.DataError
|
def export_saved_model(self, *args, **kwargs):
original = gorilla.get_original_attribute(
tensorflow.estimator.Estimator, "export_saved_model"
)
serialized = original(self, *args, **kwargs)
try_mlflow_log(
log_model,
tf_saved_model_dir=serialized.decode("utf-8"),
tf_meta_graph_tags=[tag_constants.SERVING],
tf_signature_def_key="predict",
artifact_path="model",
)
return serialized
|
def export_saved_model(self, *args, **kwargs):
original = gorilla.get_original_attribute(
tensorflow.estimator.Estimator, "export_saved_model"
)
serialized = original(self, *args, **kwargs)
try:
log_model(
tf_saved_model_dir=serialized.decode("utf-8"),
tf_meta_graph_tags=[tag_constants.SERVING],
tf_signature_def_key="predict",
artifact_path="model",
)
except MlflowException as e:
warnings.warn("Logging to MLflow failed: " + str(e))
return serialized
|
https://github.com/mlflow/mlflow/issues/1688
|
2019/08/01 18:01:53 ERROR mlflow.server: Exception on /api/2.0/mlflow/runs/set-tag [POST]
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(250)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/mlflow/store/sqlalchemy_store.py", line 148, in make_managed_session
session.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 1027, in commit
self.transaction.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 494, in commit
self._prepare_impl()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 473, in _prepare_impl
self.session.flush()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2459, in flush
self._flush(objects)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2597, in _flush
transaction.rollback(_capture_exception=True)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 68, in __exit__
compat.reraise(exc_type, exc_value, exc_tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 153, in reraise
raise value
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2557, in _flush
flush_context.execute()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
uow,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 245, in save_obj
insert,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 1084, in _emit_insert_statements
c = cached_connections[connection].execute(statement, multiparams)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 287, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1107, in _execute_clauseelement
distilled_params,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1248, in _execute_context
e, statement, parameters, cursor, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1466, in _handle_dbapi_exception
util.raise_from_cause(sqlalchemy_exception, exc_info)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 398, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 152, in reraise
raise value.with_traceback(tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(250)
[SQL: INSERT INTO tags (key, value, run_uuid) VALUES (%(key)s, %(value)s, %(run_uuid)s)]
[parameters: {'key': 'summary', 'value': '_________________________________________________________________\nLayer (type) Output Shape Param # \n=============== ... (713 characters truncated) ... =======\nTotal params: 536,110\nTrainable params: 536,110\nNon-trainable params: 0\n_________________________________________________________________', 'run_uuid': ‘123f3e6ce63d41e9ba51644694b4a37f'}]
(Background on this error at: http://sqlalche.me/e/9h9h)
|
sqlalchemy.exc.DataError
|
def export_savedmodel(self, *args, **kwargs):
original = gorilla.get_original_attribute(
tensorflow.estimator.Estimator, "export_savedmodel"
)
serialized = original(self, *args, **kwargs)
try_mlflow_log(
log_model,
tf_saved_model_dir=serialized.decode("utf-8"),
tf_meta_graph_tags=[tag_constants.SERVING],
tf_signature_def_key="predict",
artifact_path="model",
)
return serialized
|
def export_savedmodel(self, *args, **kwargs):
original = gorilla.get_original_attribute(
tensorflow.estimator.Estimator, "export_savedmodel"
)
serialized = original(self, *args, **kwargs)
try:
log_model(
tf_saved_model_dir=serialized.decode("utf-8"),
tf_meta_graph_tags=[tag_constants.SERVING],
tf_signature_def_key="predict",
artifact_path="model",
)
except MlflowException as e:
warnings.warn("Logging to MLflow failed: " + str(e))
return serialized
|
https://github.com/mlflow/mlflow/issues/1688
|
2019/08/01 18:01:53 ERROR mlflow.server: Exception on /api/2.0/mlflow/runs/set-tag [POST]
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(250)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/mlflow/store/sqlalchemy_store.py", line 148, in make_managed_session
session.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 1027, in commit
self.transaction.commit()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 494, in commit
self._prepare_impl()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 473, in _prepare_impl
self.session.flush()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2459, in flush
self._flush(objects)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2597, in _flush
transaction.rollback(_capture_exception=True)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 68, in __exit__
compat.reraise(exc_type, exc_value, exc_tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 153, in reraise
raise value
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2557, in _flush
flush_context.execute()
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
uow,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 245, in save_obj
insert,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 1084, in _emit_insert_statements
c = cached_connections[connection].execute(statement, multiparams)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 287, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1107, in _execute_clauseelement
distilled_params,
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1248, in _execute_context
e, statement, parameters, cursor, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1466, in _handle_dbapi_exception
util.raise_from_cause(sqlalchemy_exception, exc_info)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 398, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 152, in reraise
raise value.with_traceback(tb)
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
cursor, statement, parameters, context
File "/home/myserver/mlflow_env/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(250)
[SQL: INSERT INTO tags (key, value, run_uuid) VALUES (%(key)s, %(value)s, %(run_uuid)s)]
[parameters: {'key': 'summary', 'value': '_________________________________________________________________\nLayer (type) Output Shape Param # \n=============== ... (713 characters truncated) ... =======\nTotal params: 536,110\nTrainable params: 536,110\nNon-trainable params: 0\n_________________________________________________________________', 'run_uuid': ‘123f3e6ce63d41e9ba51644694b4a37f'}]
(Background on this error at: http://sqlalche.me/e/9h9h)
|
sqlalchemy.exc.DataError
|
def spark_udf(spark, model_uri, result_type="double"):
"""
A Spark UDF that can be used to invoke the Python function formatted model.
Parameters passed to the UDF are forwarded to the model as a DataFrame where the names are
ordinals (0, 1, ...).
The predictions are filtered to contain only the columns that can be represented as the
``result_type``. If the ``result_type`` is string or array of strings, all predictions are
converted to string. If the result type is not an array type, the left most column with
matching type is returned.
>>> predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
>>> df.withColumn("prediction", predict("name", "age")).show()
:param spark: A SparkSession object.
:param model_uri: The location, in URI format, of the MLflow model with the
:py:mod:`mlflow.pyfunc` flavor. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/tracking.html#
artifact-locations>`_.
:param result_type: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. Only a primitive
type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed.
The following classes of result type are supported:
- "int" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an
``int32`` or an exception if there is none.
- "long" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an
``int64`` or an exception if there is none.
- ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested
size.
- "float" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to
``float32`` or an exception if there is none.
- "double" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to
``double`` or an exception if there is none.
- ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or
an exception if there are no numeric columns.
- "string" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``.
- ``ArrayType(StringType)``: All columns converted to ``string``.
:return: Spark UDF that applies the model's ``predict`` method to the data and returns a
type specified by ``result_type``, which by default is a double.
"""
# Scope Spark import to this method so users don't need pyspark to use non-Spark-related
# functionality.
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import _parse_datatype_string
from pyspark.sql.types import ArrayType, DataType
from pyspark.sql.types import (
DoubleType,
IntegerType,
FloatType,
LongType,
StringType,
)
if not isinstance(result_type, DataType):
result_type = _parse_datatype_string(result_type)
elem_type = result_type
if isinstance(elem_type, ArrayType):
elem_type = elem_type.elementType
supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]
if not any([isinstance(elem_type, x) for x in supported_types]):
raise MlflowException(
message="Invalid result_type '{}'. Result type can only be one of or an array of one "
"of the following types types: {}".format(
str(elem_type), str(supported_types)
),
error_code=INVALID_PARAMETER_VALUE,
)
with TempDir() as local_tmpdir:
local_model_path = _download_artifact_from_uri(
artifact_uri=model_uri, output_path=local_tmpdir.path()
)
archive_path = SparkModelCache.add_local_model(spark, local_model_path)
def predict(*args):
model = SparkModelCache.get_or_load(archive_path)
schema = {str(i): arg for i, arg in enumerate(args)}
# Explicitly pass order of columns to avoid lexicographic ordering (i.e., 10 < 2)
columns = [str(i) for i, _ in enumerate(args)]
pdf = pandas.DataFrame(schema, columns=columns)
result = model.predict(pdf)
if not isinstance(result, pandas.DataFrame):
result = pandas.DataFrame(data=result)
elif type(elem_type) == IntegerType:
result = result.select_dtypes(
[np.byte, np.ubyte, np.short, np.ushort, np.int32]
).astype(np.int32)
elif type(elem_type) == LongType:
result = result.select_dtypes(
[np.byte, np.ubyte, np.short, np.ushort, np.int, np.long]
)
elif type(elem_type) == FloatType:
result = result.select_dtypes(include=(np.number,)).astype(np.float32)
elif type(elem_type) == DoubleType:
result = result.select_dtypes(include=(np.number,)).astype(np.float64)
if len(result.columns) == 0:
raise MlflowException(
message="The the model did not produce any values compatible with the requested "
"type '{}'. Consider requesting udf with StringType or "
"Arraytype(StringType).".format(str(elem_type)),
error_code=INVALID_PARAMETER_VALUE,
)
if type(elem_type) == StringType:
result = result.applymap(str)
if type(result_type) == ArrayType:
return pandas.Series([row[1].values for row in result.iterrows()])
else:
return result[result.columns[0]]
return pandas_udf(predict, result_type)
|
def spark_udf(spark, model_uri, result_type="double"):
"""
A Spark UDF that can be used to invoke the Python function formatted model.
Parameters passed to the UDF are forwarded to the model as a DataFrame where the names are
ordinals (0, 1, ...).
The predictions are filtered to contain only the columns that can be represented as the
``result_type``. If the ``result_type`` is string or array of strings, all predictions are
converted to string. If the result type is not an array type, the left most column with
matching type is returned.
>>> predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
>>> df.withColumn("prediction", predict("name", "age")).show()
:param spark: A SparkSession object.
:param model_uri: The location, in URI format, of the MLflow model with the
:py:mod:`mlflow.pyfunc` flavor. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/tracking.html#
artifact-locations>`_.
:param result_type: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. Only a primitive
type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed.
The following classes of result type are supported:
- "int" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an
``int32`` or an exception if there is none.
- "long" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an
``int64`` or an exception if there is none.
- ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested
size.
- "float" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to
``float32`` or an exception if there is none.
- "double" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to
``double`` or an exception if there is none.
- ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or
an exception if there are no numeric columns.
- "string" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``.
- ``ArrayType(StringType)``: All columns converted to ``string``.
:return: Spark UDF that applies the model's ``predict`` method to the data and returns a
type specified by ``result_type``, which by default is a double.
"""
# Scope Spark import to this method so users don't need pyspark to use non-Spark-related
# functionality.
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import _parse_datatype_string
from pyspark.sql.types import ArrayType, DataType
from pyspark.sql.types import (
DoubleType,
IntegerType,
FloatType,
LongType,
StringType,
)
if not isinstance(result_type, DataType):
result_type = _parse_datatype_string(result_type)
elem_type = result_type
if isinstance(elem_type, ArrayType):
elem_type = elem_type.elementType
supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]
if not any([isinstance(elem_type, x) for x in supported_types]):
raise MlflowException(
message="Invalid result_type '{}'. Result type can only be one of or an array of one "
"of the following types types: {}".format(
str(elem_type), str(supported_types)
),
error_code=INVALID_PARAMETER_VALUE,
)
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
archive_path = SparkModelCache.add_local_model(spark, local_model_path)
def predict(*args):
model = SparkModelCache.get_or_load(archive_path)
schema = {str(i): arg for i, arg in enumerate(args)}
# Explicitly pass order of columns to avoid lexicographic ordering (i.e., 10 < 2)
columns = [str(i) for i, _ in enumerate(args)]
pdf = pandas.DataFrame(schema, columns=columns)
result = model.predict(pdf)
if not isinstance(result, pandas.DataFrame):
result = pandas.DataFrame(data=result)
elif type(elem_type) == IntegerType:
result = result.select_dtypes(
[np.byte, np.ubyte, np.short, np.ushort, np.int32]
).astype(np.int32)
elif type(elem_type) == LongType:
result = result.select_dtypes(
[np.byte, np.ubyte, np.short, np.ushort, np.int, np.long]
)
elif type(elem_type) == FloatType:
result = result.select_dtypes(include=(np.number,)).astype(np.float32)
elif type(elem_type) == DoubleType:
result = result.select_dtypes(include=(np.number,)).astype(np.float64)
if len(result.columns) == 0:
raise MlflowException(
message="The the model did not produce any values compatible with the requested "
"type '{}'. Consider requesting udf with StringType or "
"Arraytype(StringType).".format(str(elem_type)),
error_code=INVALID_PARAMETER_VALUE,
)
if type(elem_type) == StringType:
result = result.applymap(str)
if type(result_type) == ArrayType:
return pandas.Series([row[1].values for row in result.iterrows()])
else:
return result[result.columns[0]]
return pandas_udf(predict, result_type)
|
https://github.com/mlflow/mlflow/issues/1657
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<command-3584523> in <module>()
1 import mlflow.pyfunc
2
----> 3 trained_model = mlflow.pyfunc.spark_udf(spark, model_uri='runs:/c4569266b782467cacd91c3d0ef8f9fc/model')
/databricks/python/lib/python3.6/site-packages/mlflow/pyfunc/__init__.py in spark_udf(spark, model_uri, result_type)
416
417 local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
--> 418 archive_path = SparkModelCache.add_local_model(spark, local_model_path)
419
420 def predict(*args):
/databricks/python/lib/python3.6/site-packages/mlflow/pyfunc/spark_model_cache.py in add_local_model(spark, model_path)
35 # NB: We must archive the directory as Spark.addFile does not support non-DFS
36 # directories when recursive=True.
---> 37 archive_path = shutil.make_archive(archive_basepath, 'zip', model_path)
38 spark.sparkContext.addFile(archive_path)
39 return archive_path
/databricks/python/lib/python3.6/shutil.py in make_archive(base_name, format, root_dir, base_dir, verbose, dry_run, owner, group, logger)
798
799 try:
--> 800 filename = func(base_name, base_dir, **kwargs)
801 finally:
802 if root_dir is not None:
/databricks/python/lib/python3.6/shutil.py in _make_zipfile(base_name, base_dir, verbose, dry_run, logger)
690 for name in sorted(dirnames):
691 path = os.path.normpath(os.path.join(dirpath, name))
--> 692 zf.write(path, path)
693 if logger is not None:
694 logger.info("adding '%s'", path)
/databricks/python/lib/python3.6/zipfile.py in write(self, filename, arcname, compress_type)
1592 )
1593
-> 1594 zinfo = ZipInfo.from_file(filename, arcname)
1595
1596 if zinfo.is_dir():
/databricks/python/lib/python3.6/zipfile.py in from_file(cls, filename, arcname)
494 if isdir:
495 arcname += '/'
--> 496 zinfo = cls(arcname, date_time)
497 zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
498 if isdir:
/databricks/python/lib/python3.6/zipfile.py in __init__(self, filename, date_time)
336
337 if date_time[0] < 1980:
--> 338 raise ValueError('ZIP does not support timestamps before 1980')
339
340 # Standard values:
ValueError: ZIP does not support timestamps before 1980
|
ValueError
|
def _fetch_project(uri, subdirectory, version, dst_dir, git_username, git_password):
"""
Fetches the project from the uri. Makes sure the uri contains a valid MLproject file.
Returns the working directory for running the project.
"""
# Download a project to the target `dst_dir` from a Git URI or local path.
if _GIT_URI_REGEX.match(uri):
# Use Git to clone the project
_fetch_git_repo(uri, version, dst_dir, git_username, git_password)
else:
if version is not None:
raise ExecutionException(
"Setting a version is only supported for Git project URIs"
)
# TODO: don't copy mlruns directory here
# Note: uri might be equal to dst_dir, e.g. if we're not using a temporary work dir
if uri != dst_dir:
dir_util.copy_tree(src=uri, dst=dst_dir)
# Make sure there is a MLproject file in the specified working directory.
if not os.path.isfile(os.path.join(dst_dir, subdirectory, "MLproject")):
if subdirectory == "":
raise ExecutionException("No MLproject file found in %s" % uri)
else:
raise ExecutionException(
"No MLproject file found in subdirectory %s of %s" % (subdirectory, uri)
)
return os.path.join(dst_dir, subdirectory)
|
def _fetch_project(uri, subdirectory, version, dst_dir, git_username, git_password):
"""
Fetches the project from the uri. Makes sure the uri contains a valid MLproject file.
Returns the working directory for running the project.
"""
# Download a project to the target `dst_dir` from a Git URI or local path.
if _GIT_URI_REGEX.match(uri):
# Use Git to clone the project
_fetch_git_repo(uri, version, dst_dir, git_username, git_password)
else:
if version is not None:
raise ExecutionException(
"Setting a version is only supported for Git project URIs"
)
# TODO: don't copy mlruns directory here
# Note: uri might be equal to dst_dir, e.g. if we're not using a temporary work dir
if uri != dst_dir:
dir_util.copy_tree(src=uri, dst=dst_dir)
# Make sure they don't have an outputs or mlruns directory (will need to change if we change
# how we log results locally)
shutil.rmtree(os.path.join(dst_dir, "outputs"), ignore_errors=True)
shutil.rmtree(os.path.join(dst_dir, "mlruns"), ignore_errors=True)
# Make sure there is a MLproject file in the specified working directory.
if not os.path.isfile(os.path.join(dst_dir, subdirectory, "MLproject")):
if subdirectory == "":
raise ExecutionException("No MLproject file found in %s" % uri)
else:
raise ExecutionException(
"No MLproject file found in subdirectory %s of %s" % (subdirectory, uri)
)
return os.path.join(dst_dir, subdirectory)
|
https://github.com/mlflow/mlflow/issues/147
|
(rr-sample-reg) ip-10-10-180-57:rr-sample-regression arinto$ mlflow run /Users/arinto/repository/github/rr-sample-regression -e main -P lr_feature="Employment rate as pct"
=== Fetching project from /Users/arinto/repository/github/rr-sample-regression ===
=== Work directory for this run: /Users/arinto/repository/github/rr-sample-regression ===
=== Created directory /var/folders/fd/h7tg23rd2p3cx7mnp7j47_gw0000gn/T/tmpyzpx3_w9 for downloading remote URIs passed to arguments of type 'path' ===
Traceback (most recent call last):
File "/Users/arinto/anaconda3/envs/rr-sample-reg/bin/mlflow", line 11, in <module>
sys.exit(cli())
File "/Users/arinto/anaconda3/envs/rr-sample-reg/lib/python3.6/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/Users/arinto/anaconda3/envs/rr-sample-reg/lib/python3.6/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/Users/arinto/anaconda3/envs/rr-sample-reg/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/arinto/anaconda3/envs/rr-sample-reg/lib/python3.6/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/arinto/anaconda3/envs/rr-sample-reg/lib/python3.6/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/Users/arinto/anaconda3/envs/rr-sample-reg/lib/python3.6/site-packages/mlflow/cli.py", line 108, in run
storage_dir=storage_dir)
File "/Users/arinto/anaconda3/envs/rr-sample-reg/lib/python3.6/site-packages/mlflow/projects.py", line 285, in run
storage_dir=storage_dir, git_username=git_username, git_password=git_password)
File "/Users/arinto/anaconda3/envs/rr-sample-reg/lib/python3.6/site-packages/mlflow/projects.py", line 248, in _run_local
_run_project(project, entry_point, work_dir, parameters, use_conda, storage_dir, experiment_id)
File "/Users/arinto/anaconda3/envs/rr-sample-reg/lib/python3.6/site-packages/mlflow/projects.py", line 415, in _run_project
source_type=SourceType.PROJECT)
File "/Users/arinto/anaconda3/envs/rr-sample-reg/lib/python3.6/site-packages/mlflow/tracking/__init__.py", line 257, in start_run
entry_point_name, source_type)
File "/Users/arinto/anaconda3/envs/rr-sample-reg/lib/python3.6/site-packages/mlflow/tracking/__init__.py", line 222, in _do_start_run
source_version=(source_version or _get_source_version()), tags=[])
File "/Users/arinto/anaconda3/envs/rr-sample-reg/lib/python3.6/site-packages/mlflow/store/file_store.py", line 150, in create_run
if self.get_experiment(experiment_id) is None:
File "/Users/arinto/anaconda3/envs/rr-sample-reg/lib/python3.6/site-packages/mlflow/store/file_store.py", line 116, in get_experiment
raise Exception("Could not find experiment with ID %s" % experiment_id)
Exception: Could not find experiment with ID 1
|
Exception
|
def _metric_tags(self, metric_name, val, sample, scraper_config, hostname=None):
custom_tags = scraper_config["custom_tags"]
_tags = list(custom_tags)
_tags.extend(scraper_config["_metric_tags"])
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name not in scraper_config["exclude_labels"]:
tag_name = scraper_config["labels_mapper"].get(label_name, label_name)
_tags.append(
"{}:{}".format(ensure_unicode(tag_name), ensure_unicode(label_value))
)
return self._finalize_tags_to_submit(
_tags, metric_name, val, sample, custom_tags=custom_tags, hostname=hostname
)
|
def _metric_tags(self, metric_name, val, sample, scraper_config, hostname=None):
custom_tags = scraper_config["custom_tags"]
_tags = list(custom_tags)
_tags.extend(scraper_config["_metric_tags"])
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name not in scraper_config["exclude_labels"]:
tag_name = scraper_config["labels_mapper"].get(label_name, label_name)
_tags.append("{}:{}".format(tag_name, label_value))
return self._finalize_tags_to_submit(
_tags, metric_name, val, sample, custom_tags=custom_tags, hostname=hostname
)
|
https://github.com/DataDog/integrations-core/issues/2054
|
[ AGENT ] 2018-08-14 19:31:51 UTC | ERROR | (runner.go:277 in work) | Error running check prometheus: [{"message": "'ascii' codec can't encode characters in position 1-4: ordinal not in range(128)", "traceback": "Traceback (most recent call last):\n File \"/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/checks/base.py\", line 303, in run\n self.check(copy.deepcopy(self.instances[0]))\n File \"/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/checks/prometheus/base_check.py\", line 105, in check\n ignore_unmapped=True\n File \"/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/checks/prometheus/mixins.py\", line 385, in process\n self.process_metric(metric, **kwargs)\n File \"/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/checks/prometheus/mixins.py\", line 466, in process_metric\n self._submit(message.name, message, send_histograms_buckets, send_monotonic_counter, custom_tags)\n File \"/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/checks/prometheus/mixins.py\", line 566, in _submit\n self._submit_gauges_from_histogram(metric_name, metric, send_histograms_buckets, custom_tags, custom_hostname)\n File \"/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/checks/prometheus/mixins.py\", line 634, in _submit_gauges_from_histogram\n self._submit_gauge(\"{}.count\".format(name), val, metric, custom_tags)\n File \"/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/checks/prometheus/base_check.py\", line 41, in _submit_gauge\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n File \"/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/checks/prometheus/base_check.py\", line 65, in _metric_tags\n _tags.append('{}:{}'.format(tag_name, label.value))\nUnicodeEncodeError: 'ascii' codec can't encode characters in position 1-4: ordinal not in range(128)\n"}]
|
nUnicodeEncodeError
|
def check(self, instance):
config = self.get_config(instance)
if config is None:
return
api = config["api"]
tags = list(config["tags"])
# We access the version of the Vault API corresponding to each instance's `api_url`.
try:
api["check_leader"](config, tags)
api["check_health"](config, tags)
except ApiUnreachable:
return
self.service_check(self.SERVICE_CHECK_CONNECT, AgentCheck.OK, tags=tags)
|
def check(self, instance):
config = self.get_config(instance)
if config is None:
return
tags = list(config["tags"])
# We access the version of the Vault API corresponding to each instance's `api_url`.
try:
config["api"]["check_leader"](config, tags)
config["api"]["check_health"](config, tags)
except ApiUnreachable:
return
self.service_check(self.SERVICE_CHECK_CONNECT, AgentCheck.OK, tags=tags)
|
https://github.com/DataDog/integrations-core/issues/2712
|
Error: string indices must be integers, not str
Traceback (most recent call last):
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/base/checks/base.py", line 366, in run
self.check(copy.deepcopy(self.instances[0]))
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/vault/vault.py", line 38, in check
config = self.get_config(instance)
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/vault/vault.py", line 118, in get_config
config['api'] = self.api_versions.get(api_version, self.DEFAULT_API_VERSION)['functions']
TypeError: string indices must be integers, not str
|
TypeError
|
def get_config(self, instance):
instance_id = hash_mutable(instance)
config = self.config.get(instance_id)
if config is None:
config = {}
try:
api_url = instance["api_url"]
api_version = api_url[-1]
if api_version not in self.api_versions:
self.log.warning(
"Unknown Vault API version `{}`, using version `{}`".format(
api_version, self.DEFAULT_API_VERSION
)
)
api_url = api_url[:-1] + self.DEFAULT_API_VERSION
api_version = self.DEFAULT_API_VERSION
config["api_url"] = api_url
config["api"] = self.api_versions[api_version]["functions"]
except KeyError:
self.log.error("Vault configuration setting `api_url` is required")
return
client_token = instance.get("client_token")
config["headers"] = {"X-Vault-Token": client_token} if client_token else None
username = instance.get("username")
password = instance.get("password")
config["auth"] = (username, password) if username and password else None
ssl_cert = instance.get("ssl_cert")
ssl_private_key = instance.get("ssl_private_key")
if isinstance(ssl_cert, string_types):
if isinstance(ssl_private_key, string_types):
config["ssl_cert"] = (ssl_cert, ssl_private_key)
else:
config["ssl_cert"] = ssl_cert
else:
config["ssl_cert"] = None
if isinstance(instance.get("ssl_ca_cert"), string_types):
config["ssl_verify"] = instance["ssl_ca_cert"]
else:
config["ssl_verify"] = is_affirmative(instance.get("ssl_verify", True))
config["ssl_ignore_warning"] = is_affirmative(
instance.get("ssl_ignore_warning", False)
)
config["proxies"] = self.get_instance_proxy(instance, config["api_url"])
config["timeout"] = int(instance.get("timeout", 20))
config["tags"] = instance.get("tags", [])
# Keep track of the previous cluster leader to detect changes.
config["leader"] = None
config["detect_leader"] = is_affirmative(instance.get("detect_leader"))
self.config[instance_id] = config
return config
|
def get_config(self, instance):
instance_id = hash_mutable(instance)
config = self.config.get(instance_id)
if config is None:
config = {}
try:
api_url = instance["api_url"]
api_version = api_url[-1]
if api_version not in self.api_versions:
self.log.warning(
"Unknown Vault API version `{}`, using version `{}`".format(
api_version, self.DEFAULT_API_VERSION
)
)
config["api_url"] = api_url
config["api"] = self.api_versions.get(
api_version, self.DEFAULT_API_VERSION
)["functions"]
except KeyError:
self.log.error("Vault configuration setting `api_url` is required")
return
client_token = instance.get("client_token")
config["headers"] = {"X-Vault-Token": client_token} if client_token else None
username = instance.get("username")
password = instance.get("password")
config["auth"] = (username, password) if username and password else None
ssl_cert = instance.get("ssl_cert")
ssl_private_key = instance.get("ssl_private_key")
if isinstance(ssl_cert, string_types):
if isinstance(ssl_private_key, string_types):
config["ssl_cert"] = (ssl_cert, ssl_private_key)
else:
config["ssl_cert"] = ssl_cert
else:
config["ssl_cert"] = None
if isinstance(instance.get("ssl_ca_cert"), string_types):
config["ssl_verify"] = instance["ssl_ca_cert"]
else:
config["ssl_verify"] = is_affirmative(instance.get("ssl_verify", True))
config["ssl_ignore_warning"] = is_affirmative(
instance.get("ssl_ignore_warning", False)
)
config["proxies"] = self.get_instance_proxy(instance, config["api_url"])
config["timeout"] = int(instance.get("timeout", 20))
config["tags"] = instance.get("tags", [])
# Keep track of the previous cluster leader to detect changes.
config["leader"] = None
config["detect_leader"] = is_affirmative(instance.get("detect_leader"))
self.config[instance_id] = config
return config
|
https://github.com/DataDog/integrations-core/issues/2712
|
Error: string indices must be integers, not str
Traceback (most recent call last):
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/base/checks/base.py", line 366, in run
self.check(copy.deepcopy(self.instances[0]))
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/vault/vault.py", line 38, in check
config = self.get_config(instance)
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/vault/vault.py", line 118, in get_config
config['api'] = self.api_versions.get(api_version, self.DEFAULT_API_VERSION)['functions']
TypeError: string indices must be integers, not str
|
TypeError
|
def _get_conn(self, instance):
no_cache = is_affirmative(instance.get("disable_connection_cache", False))
key = self._generate_instance_key(instance)
if no_cache or key not in self.connections:
try:
# Only send useful parameters to the redis client constructor
list_params = [
"host",
"port",
"db",
"password",
"socket_timeout",
"connection_pool",
"charset",
"errors",
"unix_socket_path",
"ssl",
"ssl_certfile",
"ssl_keyfile",
"ssl_ca_certs",
"ssl_cert_reqs",
]
# Set a default timeout (in seconds) if no timeout is specified in the instance config
instance["socket_timeout"] = instance.get("socket_timeout", 5)
connection_params = dict(
(k, instance[k]) for k in list_params if k in instance
)
# If caching is disabled, we overwrite the dictionary value so the old connection
# will be closed as soon as the corresponding Python object gets garbage collected
self.connections[key] = redis.Redis(**connection_params)
except TypeError:
msg = "You need a redis library that supports authenticated connections. Try sudo easy_install redis."
raise Exception(msg)
return self.connections[key]
|
def _get_conn(self, instance):
key = self._generate_instance_key(instance)
if key not in self.connections:
try:
# Only send useful parameters to the redis client constructor
list_params = [
"host",
"port",
"db",
"password",
"socket_timeout",
"connection_pool",
"charset",
"errors",
"unix_socket_path",
"ssl",
"ssl_certfile",
"ssl_keyfile",
"ssl_ca_certs",
"ssl_cert_reqs",
]
# Set a default timeout (in seconds) if no timeout is specified in the instance config
instance["socket_timeout"] = instance.get("socket_timeout", 5)
connection_params = dict(
(k, instance[k]) for k in list_params if k in instance
)
self.connections[key] = redis.Redis(**connection_params)
except TypeError:
msg = "You need a redis library that supports authenticated connections. Try sudo easy_install redis."
raise Exception(msg)
return self.connections[key]
|
https://github.com/DataDog/integrations-core/issues/1657
|
2018-06-04 11:08:37 UTC | INFO | dd.collector | config(config.py:1249) | initialized checks.d checks: ['system_core', 'network', 'kubernetes', 'redisdb', 'ntp', 'disk', 'kube_proxy', 'docker_daemon', 'http_check']
2018-06-04 11:08:37 UTC | INFO | dd.collector | config(config.py:1250) | initialization failed checks.d checks: []
2018-06-04 11:08:37 UTC | INFO | dd.collector | collector(agent.py:166) | Check reload was successful. Running 10 checks.
2018-06-04 11:08:51 UTC | ERROR | dd.collector | checks.redisdb(__init__.py:829) | Check 'redisdb' instance #0 failed
Traceback (most recent call last):
File "/opt/datadog-agent/agent/checks/__init__.py", line 812, in run
self.check(copy.deepcopy(instance))
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/redisdb/redisdb.py", line 377, in check
self._check_db(instance, custom_tags)
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/redisdb/redisdb.py", line 173, in _check_db
info = conn.info()
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/redis/client.py", line 665, in info
return self.execute_command('INFO')
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/redis/client.py", line 578, in execute_command
connection.send_command(*args)
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/redis/connection.py", line 563, in send_command
self.send_packed_command(self.pack_command(*args))
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/redis/connection.py", line 538, in send_packed_command
self.connect()
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/redis/connection.py", line 442, in connect
raise ConnectionError(self._error_message(e))
ConnectionError: Error connecting to 10.0.0.4:6379. timed out.
2018-06-04 11:08:52 UTC | INFO | dd.collector | checks.http_check(network_checks.py:93) | Starting Thread Pool
|
ConnectionError
|
def check(self, instance):
service_check_msg = None
offset_threshold = instance.get("offset_threshold", DEFAULT_OFFSET_THRESHOLD)
custom_tags = instance.get("tags", [])
try:
offset_threshold = int(offset_threshold)
except (TypeError, ValueError):
msg = "Must specify an integer value for offset_threshold. Configured value is {}".format(
offset_threshold
)
raise Exception(msg)
req_args = {
"host": instance.get("host", DEFAULT_HOST),
"port": self._get_service_port(instance),
"version": int(instance.get("version", DEFAULT_VERSION)),
"timeout": float(instance.get("timeout", DEFAULT_TIMEOUT)),
}
self.log.debug("Using ntp host: {}".format(req_args["host"]))
try:
ntp_stats = ntplib.NTPClient().request(**req_args)
except ntplib.NTPException:
self.log.debug("Could not connect to NTP Server {}".format(req_args["host"]))
status = AgentCheck.UNKNOWN
ntp_ts = None
else:
ntp_offset = ntp_stats.offset
# Use the ntp server's timestamp for the time of the result in
# case the agent host's clock is messed up.
ntp_ts = ntp_stats.recv_time
self.gauge("ntp.offset", ntp_offset, timestamp=ntp_ts, tags=custom_tags)
if abs(ntp_offset) > offset_threshold:
status = AgentCheck.CRITICAL
service_check_msg = (
"Offset {} secs higher than offset threshold ({} secs)".format(
ntp_offset, offset_threshold
)
)
else:
status = AgentCheck.OK
self.service_check(
"ntp.in_sync",
status,
timestamp=ntp_ts,
message=service_check_msg,
tags=custom_tags,
)
|
def check(self, instance):
service_check_msg = None
offset_threshold = instance.get("offset_threshold", DEFAULT_OFFSET_THRESHOLD)
custom_tags = instance.get("tags", [])
try:
offset_threshold = int(offset_threshold)
except (TypeError, ValueError):
raise Exception(
"Must specify an integer value for offset_threshold. Configured value is %s"
% repr(offset_threshold)
)
req_args = NTPUtil().args
self.log.debug("Using ntp host: {0}".format(req_args["host"]))
try:
ntp_stats = ntplib.NTPClient().request(**req_args)
except ntplib.NTPException:
self.log.debug("Could not connect to NTP Server {0}".format(req_args["host"]))
status = AgentCheck.UNKNOWN
ntp_ts = None
else:
ntp_offset = ntp_stats.offset
# Use the ntp server's timestamp for the time of the result in
# case the agent host's clock is messed up.
ntp_ts = ntp_stats.recv_time
self.gauge("ntp.offset", ntp_offset, timestamp=ntp_ts, tags=custom_tags)
if abs(ntp_offset) > offset_threshold:
status = AgentCheck.CRITICAL
service_check_msg = (
"Offset {0} secs higher than offset threshold ({1} secs)".format(
ntp_offset, offset_threshold
)
)
else:
status = AgentCheck.OK
self.service_check(
"ntp.in_sync",
status,
timestamp=ntp_ts,
message=service_check_msg,
tags=custom_tags,
)
|
https://github.com/DataDog/integrations-core/issues/1569
|
2018-05-17 11:30:31 UTC | INFO | dd.collector | config(config.py:1249) | initialized checks.d checks: ['kube_dns', 'network', 'kubernetes', 'ntp', 'docker_daemon', 'http_check', 'system_core', 'redisdb', 'disk', 'kube_proxy']
2018-05-17 11:30:31 UTC | INFO | dd.collector | config(config.py:1250) | initialization failed checks.d checks: []
2018-05-17 11:30:31 UTC | INFO | dd.collector | collector(agent.py:166) | Check reload was successful. Running 11 checks.
2018-05-17 11:30:35 UTC | ERROR | dd.collector | checks.ntp(__init__.py:829) | Check 'ntp' instance #0 failed
Traceback (most recent call last):
File "/opt/datadog-agent/agent/checks/__init__.py", line 812, in run
self.check(copy.deepcopy(instance))
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/ntp/ntp.py", line 33, in check
ntp_stats = ntplib.NTPClient().request(**req_args)
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/ntplib.py", line 292, in request
addrinfo = socket.getaddrinfo(host, port)[0]
gaierror: [Errno -8] Servname not supported for ai_socktype
|
gaierror
|
def _get_replica_stats(self, db, is_mariadb, replication_channel):
replica_results = defaultdict(dict)
try:
with closing(db.cursor(pymysql.cursors.DictCursor)) as cursor:
if is_mariadb and replication_channel:
cursor.execute(
"SET @@default_master_connection = '{0}';".format(
replication_channel
)
)
cursor.execute("SHOW SLAVE STATUS;")
elif replication_channel:
cursor.execute(
"SHOW SLAVE STATUS FOR CHANNEL '{0}';".format(replication_channel)
)
else:
cursor.execute("SHOW SLAVE STATUS;")
for slave_result in cursor.fetchall():
# MySQL <5.7 does not have Channel_Name.
# For MySQL >=5.7 'Channel_Name' is set to an empty string by default
channel = (
replication_channel or slave_result.get("Channel_Name") or "default"
)
for key, value in iteritems(slave_result):
if value is not None:
replica_results[key]["channel:{0}".format(channel)] = value
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
errno, msg = e.args
if errno == 1617 and msg == "There is no master connection '{0}'".format(
replication_channel
):
# MariaDB complains when you try to get slave status with a
# connection name on the master, without connection name it
# responds an empty string as expected.
# Mysql behaves the same with or without connection name.
pass
else:
self.warning(
"Privileges error getting replication status (must grant REPLICATION CLIENT): %s"
% str(e)
)
try:
with closing(db.cursor(pymysql.cursors.DictCursor)) as cursor:
cursor.execute("SHOW MASTER STATUS;")
binlog_results = cursor.fetchone()
if binlog_results:
replica_results.update({"Binlog_enabled": True})
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
self.warning(
"Privileges error getting binlog information (must grant REPLICATION CLIENT): %s"
% str(e)
)
return replica_results
|
def _get_replica_stats(self, db, is_mariadb, replication_channel):
replica_results = {}
try:
with closing(db.cursor(pymysql.cursors.DictCursor)) as cursor:
if is_mariadb and replication_channel:
cursor.execute(
"SET @@default_master_connection = '{0}';".format(
replication_channel
)
)
cursor.execute("SHOW SLAVE STATUS;")
elif replication_channel:
cursor.execute(
"SHOW SLAVE STATUS FOR CHANNEL '{0}';".format(replication_channel)
)
else:
cursor.execute("SHOW SLAVE STATUS;")
if replication_channel:
slave_results = cursor.fetchone()
else:
slave_results = cursor.fetchall()
if slave_results:
if replication_channel:
replica_results.update(slave_results)
elif len(slave_results) > 0:
for slave_result in slave_results:
# MySQL <5.7 does not have Channel_Name.
# For MySQL >=5.7 'Channel_Name' is set to an empty string by default
channel = slave_result.get("Channel_Name") or "default"
for key in slave_result:
if slave_result[key] is not None:
if key not in replica_results:
replica_results[key] = {}
replica_results[key]["channel:{0}".format(channel)] = (
slave_result[key]
)
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
errno, msg = e.args
if errno == 1617 and msg == "There is no master connection '{0}'".format(
replication_channel
):
# MariaDB complains when you try to get slave status with a
# connection name on the master, without connection name it
# responds an empty string as expected.
# Mysql behaves the same with or without connection name.
pass
else:
self.warning(
"Privileges error getting replication status (must grant REPLICATION CLIENT): %s"
% str(e)
)
try:
with closing(db.cursor(pymysql.cursors.DictCursor)) as cursor:
cursor.execute("SHOW MASTER STATUS;")
binlog_results = cursor.fetchone()
if binlog_results:
replica_results.update({"Binlog_enabled": True})
except (pymysql.err.InternalError, pymysql.err.OperationalError) as e:
self.warning(
"Privileges error getting binlog information (must grant REPLICATION CLIENT): %s"
% str(e)
)
return replica_results
|
https://github.com/DataDog/integrations-core/issues/1127
|
2018-02-16 06:40:03 PST | DEBUG | dd.collector | checks.mysql(mysql.py:702) | Collecting data with performance_schema
2018-02-16 06:40:03 PST | DEBUG | dd.collector | checks.mysql(mysql.py:706) | Collecting done, value OFF
2018-02-16 06:40:03 PST | DEBUG | dd.collector | checks.mysql(mysql.py:653) | MySQL version ['5', '7', '20']
2018-02-16 06:40:03 PST | DEBUG | dd.collector | checks.mysql(mysql.py:702) | Collecting data with Slave_running
2018-02-16 06:40:03 PST | DEBUG | dd.collector | checks.mysql(mysql.py:704) | Slave_running returned None
2018-02-16 06:40:03 PST | DEBUG | dd.collector | checks.mysql(mysql.py:702) | Collecting data with Slaves_connected
2018-02-16 06:40:03 PST | DEBUG | dd.collector | checks.mysql(mysql.py:706) | Collecting done, value 0
2018-02-16 06:40:03 PST | DEBUG | dd.collector | checks.mysql(mysql.py:653) | MySQL version ['5', '7', '20']
2018-02-16 06:40:03 PST | DEBUG | dd.collector | checks.mysql(mysql.py:702) | Collecting data with Slave_IO_Running
2018-02-16 06:40:03 PST | DEBUG | dd.collector | checks.mysql(mysql.py:706) | Collecting done, value Yes
2018-02-16 06:40:03 PST | ERROR | dd.collector | checks.mysql(mysql.py:313) | error!
Traceback (most recent call last):
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/mysql/mysql.py", line 306, in check
self._collect_metrics(host, db, tags, options, queries)
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/mysql/mysql.py", line 550, in _collect_metrics
slave_io_running = self._collect_type('Slave_IO_Running', results, dict)
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/mysql/mysql.py", line 707, in _collect_type
return the_type(dict[key])
ValueError: dictionary update sequence element #0 has length 1; 2 is required
2018-02-16 06:40:03 PST | ERROR | dd.collector | checks.mysql(__init__.py:829) | Check 'mysql' instance #0 failed
Traceback (most recent call last):
File "/opt/datadog-agent/agent/checks/__init__.py", line 812, in run
self.check(copy.deepcopy(instance))
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/datadog_checks/mysql/mysql.py", line 314, in check
raise e
ValueError: dictionary update sequence element #0 has length 1; 2 is required
|
ValueError
|
async def data(
self,
request,
database,
hash,
table,
default_labels=False,
_next=None,
_size=None,
):
canned_query = await self.ds.get_canned_query(database, table, request.actor)
if canned_query:
return await QueryView(self.ds).data(
request,
database,
hash,
canned_query["sql"],
metadata=canned_query,
editable=False,
canned_query=table,
named_parameters=canned_query.get("params"),
write=bool(canned_query.get("write")),
)
db = self.ds.databases[database]
is_view = bool(await db.get_view_definition(table))
table_exists = bool(await db.table_exists(table))
if not is_view and not table_exists:
raise NotFound(f"Table not found: {table}")
await self.check_permissions(
request,
[
("view-table", (database, table)),
("view-database", database),
"view-instance",
],
)
private = not await self.ds.permission_allowed(
None, "view-table", (database, table), default=True
)
pks = await db.primary_keys(table)
table_column_details = await db.table_column_details(table)
table_columns = [column.name for column in table_column_details]
select_columns = ", ".join(escape_sqlite(t) for t in table_columns)
use_rowid = not pks and not is_view
if use_rowid:
select = f"rowid, {select_columns}"
order_by = "rowid"
order_by_pks = "rowid"
else:
select = select_columns
order_by_pks = ", ".join([escape_sqlite(pk) for pk in pks])
order_by = order_by_pks
if is_view:
order_by = ""
# Ensure we don't drop anything with an empty value e.g. ?name__exact=
args = MultiParams(
urllib.parse.parse_qs(request.query_string, keep_blank_values=True)
)
# Special args start with _ and do not contain a __
# That's so if there is a column that starts with _
# it can still be queried using ?_col__exact=blah
special_args = {}
other_args = []
for key in args:
if key.startswith("_") and "__" not in key:
special_args[key] = args[key]
else:
for v in args.getlist(key):
other_args.append((key, v))
# Handle ?_filter_column and redirect, if present
redirect_params = filters_should_redirect(special_args)
if redirect_params:
return self.redirect(
request,
path_with_added_args(request, redirect_params),
forward_querystring=False,
)
# If ?_sort_by_desc=on (from checkbox) redirect to _sort_desc=(_sort)
if "_sort_by_desc" in special_args:
return self.redirect(
request,
path_with_added_args(
request,
{
"_sort_desc": special_args.get("_sort"),
"_sort_by_desc": None,
"_sort": None,
},
),
forward_querystring=False,
)
table_metadata = self.ds.table_metadata(database, table)
units = table_metadata.get("units", {})
filters = Filters(sorted(other_args), units, ureg)
where_clauses, params = filters.build_where_clauses(table)
extra_wheres_for_ui = []
# Add _where= from querystring
if "_where" in request.args:
if not await self.ds.permission_allowed(
request.actor,
"execute-sql",
resource=database,
default=True,
):
raise DatasetteError("_where= is not allowed", status=403)
else:
where_clauses.extend(request.args.getlist("_where"))
extra_wheres_for_ui = [
{
"text": text,
"remove_url": path_with_removed_args(request, {"_where": text}),
}
for text in request.args.getlist("_where")
]
# Support for ?_through={table, column, value}
extra_human_descriptions = []
if "_through" in request.args:
for through in request.args.getlist("_through"):
through_data = json.loads(through)
through_table = through_data["table"]
other_column = through_data["column"]
value = through_data["value"]
outgoing_foreign_keys = await db.foreign_keys_for_table(through_table)
try:
fk_to_us = [
fk for fk in outgoing_foreign_keys if fk["other_table"] == table
][0]
except IndexError:
raise DatasetteError(
"Invalid _through - could not find corresponding foreign key"
)
param = f"p{len(params)}"
where_clauses.append(
"{our_pk} in (select {our_column} from {through_table} where {other_column} = :{param})".format(
through_table=escape_sqlite(through_table),
our_pk=escape_sqlite(fk_to_us["other_column"]),
our_column=escape_sqlite(fk_to_us["column"]),
other_column=escape_sqlite(other_column),
param=param,
)
)
params[param] = value
extra_human_descriptions.append(
f'{through_table}.{other_column} = "{value}"'
)
# _search support:
fts_table = special_args.get("_fts_table")
fts_table = fts_table or table_metadata.get("fts_table")
fts_table = fts_table or await db.fts_table(table)
fts_pk = special_args.get("_fts_pk", table_metadata.get("fts_pk", "rowid"))
search_args = dict(
pair
for pair in special_args.items()
if pair[0].startswith("_search") and pair[0] != "_searchmode"
)
search = ""
search_mode_raw = special_args.get("_searchmode") == "raw"
if fts_table and search_args:
if "_search" in search_args:
# Simple ?_search=xxx
search = search_args["_search"]
where_clauses.append(
"{fts_pk} in (select rowid from {fts_table} where {fts_table} match {match_clause})".format(
fts_table=escape_sqlite(fts_table),
fts_pk=escape_sqlite(fts_pk),
match_clause=":search"
if search_mode_raw
else "escape_fts(:search)",
)
)
extra_human_descriptions.append(f'search matches "{search}"')
params["search"] = search
else:
# More complex: search against specific columns
for i, (key, search_text) in enumerate(search_args.items()):
search_col = key.split("_search_", 1)[1]
if search_col not in await db.table_columns(fts_table):
raise BadRequest("Cannot search by that column")
where_clauses.append(
"rowid in (select rowid from {fts_table} where {search_col} match {match_clause})".format(
fts_table=escape_sqlite(fts_table),
search_col=escape_sqlite(search_col),
match_clause=":search_{}".format(i)
if search_mode_raw
else "escape_fts(:search_{})".format(i),
)
)
extra_human_descriptions.append(
f'search column "{search_col}" matches "{search_text}"'
)
params[f"search_{i}"] = search_text
sortable_columns = set()
sortable_columns = await self.sortable_columns_for_table(database, table, use_rowid)
# Allow for custom sort order
sort = special_args.get("_sort")
sort_desc = special_args.get("_sort_desc")
if not sort and not sort_desc:
sort = table_metadata.get("sort")
sort_desc = table_metadata.get("sort_desc")
if sort and sort_desc:
raise DatasetteError("Cannot use _sort and _sort_desc at the same time")
if sort:
if sort not in sortable_columns:
raise DatasetteError(f"Cannot sort table by {sort}")
order_by = escape_sqlite(sort)
if sort_desc:
if sort_desc not in sortable_columns:
raise DatasetteError(f"Cannot sort table by {sort_desc}")
order_by = f"{escape_sqlite(sort_desc)} desc"
from_sql = "from {table_name} {where}".format(
table_name=escape_sqlite(table),
where=("where {} ".format(" and ".join(where_clauses)))
if where_clauses
else "",
)
# Copy of params so we can mutate them later:
from_sql_params = dict(**params)
count_sql = f"select count(*) {from_sql}"
_next = _next or special_args.get("_next")
offset = ""
if _next:
if is_view:
# _next is an offset
offset = f" offset {int(_next)}"
else:
components = urlsafe_components(_next)
# If a sort order is applied, the first of these is the sort value
if sort or sort_desc:
sort_value = components[0]
# Special case for if non-urlencoded first token was $null
if _next.split(",")[0] == "$null":
sort_value = None
components = components[1:]
# Figure out the SQL for next-based-on-primary-key first
next_by_pk_clauses = []
if use_rowid:
next_by_pk_clauses.append(f"rowid > :p{len(params)}")
params[f"p{len(params)}"] = components[0]
else:
# Apply the tie-breaker based on primary keys
if len(components) == len(pks):
param_len = len(params)
next_by_pk_clauses.append(compound_keys_after_sql(pks, param_len))
for i, pk_value in enumerate(components):
params[f"p{param_len + i}"] = pk_value
# Now add the sort SQL, which may incorporate next_by_pk_clauses
if sort or sort_desc:
if sort_value is None:
if sort_desc:
# Just items where column is null ordered by pk
where_clauses.append(
"({column} is null and {next_clauses})".format(
column=escape_sqlite(sort_desc),
next_clauses=" and ".join(next_by_pk_clauses),
)
)
else:
where_clauses.append(
"({column} is not null or ({column} is null and {next_clauses}))".format(
column=escape_sqlite(sort),
next_clauses=" and ".join(next_by_pk_clauses),
)
)
else:
where_clauses.append(
"({column} {op} :p{p}{extra_desc_only} or ({column} = :p{p} and {next_clauses}))".format(
column=escape_sqlite(sort or sort_desc),
op=">" if sort else "<",
p=len(params),
extra_desc_only=""
if sort
else " or {column2} is null".format(
column2=escape_sqlite(sort or sort_desc)
),
next_clauses=" and ".join(next_by_pk_clauses),
)
)
params[f"p{len(params)}"] = sort_value
order_by = f"{order_by}, {order_by_pks}"
else:
where_clauses.extend(next_by_pk_clauses)
where_clause = ""
if where_clauses:
where_clause = f"where {' and '.join(where_clauses)} "
if order_by:
order_by = f"order by {order_by} "
extra_args = {}
# Handle ?_size=500
page_size = _size or request.args.get("_size") or table_metadata.get("size")
if page_size:
if page_size == "max":
page_size = self.ds.max_returned_rows
try:
page_size = int(page_size)
if page_size < 0:
raise ValueError
except ValueError:
raise BadRequest("_size must be a positive integer")
if page_size > self.ds.max_returned_rows:
raise BadRequest(f"_size must be <= {self.ds.max_returned_rows}")
extra_args["page_size"] = page_size
else:
page_size = self.ds.page_size
sql_no_limit = "select {select} from {table_name} {where}{order_by}".format(
select=select,
table_name=escape_sqlite(table),
where=where_clause,
order_by=order_by,
)
sql = f"{sql_no_limit.rstrip()} limit {page_size + 1}{offset}"
if request.args.get("_timelimit"):
extra_args["custom_time_limit"] = int(request.args.get("_timelimit"))
results = await db.execute(sql, params, truncate=True, **extra_args)
# Number of filtered rows in whole set:
filtered_table_rows_count = None
if (
not db.is_mutable
and self.ds.inspect_data
and count_sql == f"select count(*) from {table} "
):
try:
filtered_table_rows_count = self.ds.inspect_data[database]["tables"][table][
"count"
]
except KeyError:
pass
if count_sql and filtered_table_rows_count is None:
try:
count_rows = list(await db.execute(count_sql, from_sql_params))
filtered_table_rows_count = count_rows[0][0]
except QueryInterrupted:
pass
# facets support
if not self.ds.setting("allow_facet") and any(
arg.startswith("_facet") for arg in request.args
):
raise BadRequest("_facet= is not allowed")
# pylint: disable=no-member
facet_classes = list(
itertools.chain.from_iterable(pm.hook.register_facet_classes())
)
facet_results = {}
facets_timed_out = []
facet_instances = []
for klass in facet_classes:
facet_instances.append(
klass(
self.ds,
request,
database,
sql=sql_no_limit,
params=params,
table=table,
metadata=table_metadata,
row_count=filtered_table_rows_count,
)
)
for facet in facet_instances:
(
instance_facet_results,
instance_facets_timed_out,
) = await facet.facet_results()
facet_results.update(instance_facet_results)
facets_timed_out.extend(instance_facets_timed_out)
# Figure out columns and rows for the query
columns = [r[0] for r in results.description]
rows = list(results.rows)
# Expand labeled columns if requested
expanded_columns = []
expandable_columns = await self.expandable_columns(database, table)
columns_to_expand = None
try:
all_labels = value_as_boolean(special_args.get("_labels", ""))
except ValueError:
all_labels = default_labels
# Check for explicit _label=
if "_label" in request.args:
columns_to_expand = request.args.getlist("_label")
if columns_to_expand is None and all_labels:
# expand all columns with foreign keys
columns_to_expand = [fk["column"] for fk, _ in expandable_columns]
if columns_to_expand:
expanded_labels = {}
for fk, _ in expandable_columns:
column = fk["column"]
if column not in columns_to_expand:
continue
expanded_columns.append(column)
# Gather the values
column_index = columns.index(column)
values = [row[column_index] for row in rows]
# Expand them
expanded_labels.update(
await self.ds.expand_foreign_keys(database, table, column, values)
)
if expanded_labels:
# Rewrite the rows
new_rows = []
for row in rows:
new_row = CustomRow(columns)
for column in row.keys():
value = row[column]
if (column, value) in expanded_labels and value is not None:
new_row[column] = {
"value": value,
"label": expanded_labels[(column, value)],
}
else:
new_row[column] = value
new_rows.append(new_row)
rows = new_rows
# Pagination next link
next_value = None
next_url = None
if len(rows) > page_size and page_size > 0:
if is_view:
next_value = int(_next or 0) + page_size
else:
next_value = path_from_row_pks(rows[-2], pks, use_rowid)
# If there's a sort or sort_desc, add that value as a prefix
if (sort or sort_desc) and not is_view:
prefix = rows[-2][sort or sort_desc]
if isinstance(prefix, dict) and "value" in prefix:
prefix = prefix["value"]
if prefix is None:
prefix = "$null"
else:
prefix = urllib.parse.quote_plus(str(prefix))
next_value = f"{prefix},{next_value}"
added_args = {"_next": next_value}
if sort:
added_args["_sort"] = sort
else:
added_args["_sort_desc"] = sort_desc
else:
added_args = {"_next": next_value}
next_url = self.ds.absolute_url(
request, path_with_replaced_args(request, added_args)
)
rows = rows[:page_size]
# Detect suggested facets
suggested_facets = []
if (
self.ds.setting("suggest_facets")
and self.ds.setting("allow_facet")
and not _next
):
for facet in facet_instances:
suggested_facets.extend(await facet.suggest())
# human_description_en combines filters AND search, if provided
human_description_en = filters.human_description_en(extra=extra_human_descriptions)
if sort or sort_desc:
sorted_by = "sorted by {}{}".format(
(sort or sort_desc), " descending" if sort_desc else ""
)
human_description_en = " ".join(
[b for b in [human_description_en, sorted_by] if b]
)
async def extra_template():
nonlocal sort
display_columns, display_rows = await self.display_columns_and_rows(
database,
table,
results.description,
rows,
link_column=not is_view,
truncate_cells=self.ds.setting("truncate_cells_html"),
)
metadata = (
(self.ds.metadata("databases") or {})
.get(database, {})
.get("tables", {})
.get(table, {})
)
self.ds.update_with_inherited_metadata(metadata)
form_hidden_args = []
# Add currently selected facets
for arg in special_args:
if arg == "_facet" or arg.startswith("_facet_"):
form_hidden_args.extend(
(arg, item) for item in request.args.getlist(arg)
)
for arg in ("_fts_table", "_fts_pk"):
if arg in special_args:
form_hidden_args.append((arg, special_args[arg]))
if request.args.get("_where"):
for where_text in request.args.getlist("_where"):
form_hidden_args.append(("_where", where_text))
# if no sort specified AND table has a single primary key,
# set sort to that so arrow is displayed
if not sort and not sort_desc:
if 1 == len(pks):
sort = pks[0]
elif use_rowid:
sort = "rowid"
async def table_actions():
links = []
for hook in pm.hook.table_actions(
datasette=self.ds,
table=table,
database=database,
actor=request.actor,
):
extra_links = await await_me_maybe(hook)
if extra_links:
links.extend(extra_links)
return links
return {
"table_actions": table_actions,
"supports_search": bool(fts_table),
"search": search or "",
"use_rowid": use_rowid,
"filters": filters,
"display_columns": display_columns,
"filter_columns": columns,
"display_rows": display_rows,
"facets_timed_out": facets_timed_out,
"sorted_facet_results": sorted(
facet_results.values(),
key=lambda f: (len(f["results"]), f["name"]),
reverse=True,
),
"extra_wheres_for_ui": extra_wheres_for_ui,
"form_hidden_args": form_hidden_args,
"is_sortable": any(c["sortable"] for c in display_columns),
"path_with_replaced_args": path_with_replaced_args,
"path_with_removed_args": path_with_removed_args,
"append_querystring": append_querystring,
"request": request,
"sort": sort,
"sort_desc": sort_desc,
"disable_sort": is_view,
"custom_table_templates": [
f"_table-{to_css_class(database)}-{to_css_class(table)}.html",
f"_table-table-{to_css_class(database)}-{to_css_class(table)}.html",
"_table.html",
],
"metadata": metadata,
"view_definition": await db.get_view_definition(table),
"table_definition": await db.get_table_definition(table),
}
return (
{
"database": database,
"table": table,
"is_view": is_view,
"human_description_en": human_description_en,
"rows": rows[:page_size],
"truncated": results.truncated,
"filtered_table_rows_count": filtered_table_rows_count,
"expanded_columns": expanded_columns,
"expandable_columns": expandable_columns,
"columns": columns,
"primary_keys": pks,
"units": units,
"query": {"sql": sql, "params": params},
"facet_results": facet_results,
"suggested_facets": suggested_facets,
"next": next_value and str(next_value) or None,
"next_url": next_url,
"private": private,
"allow_execute_sql": await self.ds.permission_allowed(
request.actor, "execute-sql", database, default=True
),
},
extra_template,
(
f"table-{to_css_class(database)}-{to_css_class(table)}.html",
"table.html",
),
)
|
async def data(
self,
request,
database,
hash,
table,
default_labels=False,
_next=None,
_size=None,
):
canned_query = await self.ds.get_canned_query(database, table, request.actor)
if canned_query:
return await QueryView(self.ds).data(
request,
database,
hash,
canned_query["sql"],
metadata=canned_query,
editable=False,
canned_query=table,
named_parameters=canned_query.get("params"),
write=bool(canned_query.get("write")),
)
db = self.ds.databases[database]
is_view = bool(await db.get_view_definition(table))
table_exists = bool(await db.table_exists(table))
if not is_view and not table_exists:
raise NotFound(f"Table not found: {table}")
await self.check_permissions(
request,
[
("view-table", (database, table)),
("view-database", database),
"view-instance",
],
)
private = not await self.ds.permission_allowed(
None, "view-table", (database, table), default=True
)
pks = await db.primary_keys(table)
table_column_details = await db.table_column_details(table)
table_columns = [column.name for column in table_column_details]
select_columns = ", ".join(escape_sqlite(t) for t in table_columns)
use_rowid = not pks and not is_view
if use_rowid:
select = f"rowid, {select_columns}"
order_by = "rowid"
order_by_pks = "rowid"
else:
select = select_columns
order_by_pks = ", ".join([escape_sqlite(pk) for pk in pks])
order_by = order_by_pks
if is_view:
order_by = ""
# Ensure we don't drop anything with an empty value e.g. ?name__exact=
args = MultiParams(
urllib.parse.parse_qs(request.query_string, keep_blank_values=True)
)
# Special args start with _ and do not contain a __
# That's so if there is a column that starts with _
# it can still be queried using ?_col__exact=blah
special_args = {}
other_args = []
for key in args:
if key.startswith("_") and "__" not in key:
special_args[key] = args[key]
else:
for v in args.getlist(key):
other_args.append((key, v))
# Handle ?_filter_column and redirect, if present
redirect_params = filters_should_redirect(special_args)
if redirect_params:
return self.redirect(
request,
path_with_added_args(request, redirect_params),
forward_querystring=False,
)
# If ?_sort_by_desc=on (from checkbox) redirect to _sort_desc=(_sort)
if "_sort_by_desc" in special_args:
return self.redirect(
request,
path_with_added_args(
request,
{
"_sort_desc": special_args.get("_sort"),
"_sort_by_desc": None,
"_sort": None,
},
),
forward_querystring=False,
)
table_metadata = self.ds.table_metadata(database, table)
units = table_metadata.get("units", {})
filters = Filters(sorted(other_args), units, ureg)
where_clauses, params = filters.build_where_clauses(table)
extra_wheres_for_ui = []
# Add _where= from querystring
if "_where" in request.args:
if not await self.ds.permission_allowed(
request.actor,
"execute-sql",
resource=database,
default=True,
):
raise DatasetteError("_where= is not allowed", status=403)
else:
where_clauses.extend(request.args.getlist("_where"))
extra_wheres_for_ui = [
{
"text": text,
"remove_url": path_with_removed_args(request, {"_where": text}),
}
for text in request.args.getlist("_where")
]
# Support for ?_through={table, column, value}
extra_human_descriptions = []
if "_through" in request.args:
for through in request.args.getlist("_through"):
through_data = json.loads(through)
through_table = through_data["table"]
other_column = through_data["column"]
value = through_data["value"]
outgoing_foreign_keys = await db.foreign_keys_for_table(through_table)
try:
fk_to_us = [
fk for fk in outgoing_foreign_keys if fk["other_table"] == table
][0]
except IndexError:
raise DatasetteError(
"Invalid _through - could not find corresponding foreign key"
)
param = f"p{len(params)}"
where_clauses.append(
"{our_pk} in (select {our_column} from {through_table} where {other_column} = :{param})".format(
through_table=escape_sqlite(through_table),
our_pk=escape_sqlite(fk_to_us["other_column"]),
our_column=escape_sqlite(fk_to_us["column"]),
other_column=escape_sqlite(other_column),
param=param,
)
)
params[param] = value
extra_human_descriptions.append(
f'{through_table}.{other_column} = "{value}"'
)
# _search support:
fts_table = special_args.get("_fts_table")
fts_table = fts_table or table_metadata.get("fts_table")
fts_table = fts_table or await db.fts_table(table)
fts_pk = special_args.get("_fts_pk", table_metadata.get("fts_pk", "rowid"))
search_args = dict(
pair for pair in special_args.items() if pair[0].startswith("_search")
)
search = ""
search_mode_raw = special_args.get("_searchmode") == "raw"
if fts_table and search_args:
if "_search" in search_args:
# Simple ?_search=xxx
search = search_args["_search"]
where_clauses.append(
"{fts_pk} in (select rowid from {fts_table} where {fts_table} match {match_clause})".format(
fts_table=escape_sqlite(fts_table),
fts_pk=escape_sqlite(fts_pk),
match_clause=":search"
if search_mode_raw
else "escape_fts(:search)",
)
)
extra_human_descriptions.append(f'search matches "{search}"')
params["search"] = search
else:
# More complex: search against specific columns
for i, (key, search_text) in enumerate(search_args.items()):
search_col = key.split("_search_", 1)[1]
if search_col not in await db.table_columns(fts_table):
raise BadRequest("Cannot search by that column")
where_clauses.append(
"rowid in (select rowid from {fts_table} where {search_col} match {match_clause})".format(
fts_table=escape_sqlite(fts_table),
search_col=escape_sqlite(search_col),
match_clause=":search_{}".format(i)
if search_mode_raw
else "escape_fts(:search_{})".format(i),
)
)
extra_human_descriptions.append(
f'search column "{search_col}" matches "{search_text}"'
)
params[f"search_{i}"] = search_text
sortable_columns = set()
sortable_columns = await self.sortable_columns_for_table(database, table, use_rowid)
# Allow for custom sort order
sort = special_args.get("_sort")
sort_desc = special_args.get("_sort_desc")
if not sort and not sort_desc:
sort = table_metadata.get("sort")
sort_desc = table_metadata.get("sort_desc")
if sort and sort_desc:
raise DatasetteError("Cannot use _sort and _sort_desc at the same time")
if sort:
if sort not in sortable_columns:
raise DatasetteError(f"Cannot sort table by {sort}")
order_by = escape_sqlite(sort)
if sort_desc:
if sort_desc not in sortable_columns:
raise DatasetteError(f"Cannot sort table by {sort_desc}")
order_by = f"{escape_sqlite(sort_desc)} desc"
from_sql = "from {table_name} {where}".format(
table_name=escape_sqlite(table),
where=("where {} ".format(" and ".join(where_clauses)))
if where_clauses
else "",
)
# Copy of params so we can mutate them later:
from_sql_params = dict(**params)
count_sql = f"select count(*) {from_sql}"
_next = _next or special_args.get("_next")
offset = ""
if _next:
if is_view:
# _next is an offset
offset = f" offset {int(_next)}"
else:
components = urlsafe_components(_next)
# If a sort order is applied, the first of these is the sort value
if sort or sort_desc:
sort_value = components[0]
# Special case for if non-urlencoded first token was $null
if _next.split(",")[0] == "$null":
sort_value = None
components = components[1:]
# Figure out the SQL for next-based-on-primary-key first
next_by_pk_clauses = []
if use_rowid:
next_by_pk_clauses.append(f"rowid > :p{len(params)}")
params[f"p{len(params)}"] = components[0]
else:
# Apply the tie-breaker based on primary keys
if len(components) == len(pks):
param_len = len(params)
next_by_pk_clauses.append(compound_keys_after_sql(pks, param_len))
for i, pk_value in enumerate(components):
params[f"p{param_len + i}"] = pk_value
# Now add the sort SQL, which may incorporate next_by_pk_clauses
if sort or sort_desc:
if sort_value is None:
if sort_desc:
# Just items where column is null ordered by pk
where_clauses.append(
"({column} is null and {next_clauses})".format(
column=escape_sqlite(sort_desc),
next_clauses=" and ".join(next_by_pk_clauses),
)
)
else:
where_clauses.append(
"({column} is not null or ({column} is null and {next_clauses}))".format(
column=escape_sqlite(sort),
next_clauses=" and ".join(next_by_pk_clauses),
)
)
else:
where_clauses.append(
"({column} {op} :p{p}{extra_desc_only} or ({column} = :p{p} and {next_clauses}))".format(
column=escape_sqlite(sort or sort_desc),
op=">" if sort else "<",
p=len(params),
extra_desc_only=""
if sort
else " or {column2} is null".format(
column2=escape_sqlite(sort or sort_desc)
),
next_clauses=" and ".join(next_by_pk_clauses),
)
)
params[f"p{len(params)}"] = sort_value
order_by = f"{order_by}, {order_by_pks}"
else:
where_clauses.extend(next_by_pk_clauses)
where_clause = ""
if where_clauses:
where_clause = f"where {' and '.join(where_clauses)} "
if order_by:
order_by = f"order by {order_by} "
extra_args = {}
# Handle ?_size=500
page_size = _size or request.args.get("_size") or table_metadata.get("size")
if page_size:
if page_size == "max":
page_size = self.ds.max_returned_rows
try:
page_size = int(page_size)
if page_size < 0:
raise ValueError
except ValueError:
raise BadRequest("_size must be a positive integer")
if page_size > self.ds.max_returned_rows:
raise BadRequest(f"_size must be <= {self.ds.max_returned_rows}")
extra_args["page_size"] = page_size
else:
page_size = self.ds.page_size
sql_no_limit = "select {select} from {table_name} {where}{order_by}".format(
select=select,
table_name=escape_sqlite(table),
where=where_clause,
order_by=order_by,
)
sql = f"{sql_no_limit.rstrip()} limit {page_size + 1}{offset}"
if request.args.get("_timelimit"):
extra_args["custom_time_limit"] = int(request.args.get("_timelimit"))
results = await db.execute(sql, params, truncate=True, **extra_args)
# Number of filtered rows in whole set:
filtered_table_rows_count = None
if (
not db.is_mutable
and self.ds.inspect_data
and count_sql == f"select count(*) from {table} "
):
try:
filtered_table_rows_count = self.ds.inspect_data[database]["tables"][table][
"count"
]
except KeyError:
pass
if count_sql and filtered_table_rows_count is None:
try:
count_rows = list(await db.execute(count_sql, from_sql_params))
filtered_table_rows_count = count_rows[0][0]
except QueryInterrupted:
pass
# facets support
if not self.ds.setting("allow_facet") and any(
arg.startswith("_facet") for arg in request.args
):
raise BadRequest("_facet= is not allowed")
# pylint: disable=no-member
facet_classes = list(
itertools.chain.from_iterable(pm.hook.register_facet_classes())
)
facet_results = {}
facets_timed_out = []
facet_instances = []
for klass in facet_classes:
facet_instances.append(
klass(
self.ds,
request,
database,
sql=sql_no_limit,
params=params,
table=table,
metadata=table_metadata,
row_count=filtered_table_rows_count,
)
)
for facet in facet_instances:
(
instance_facet_results,
instance_facets_timed_out,
) = await facet.facet_results()
facet_results.update(instance_facet_results)
facets_timed_out.extend(instance_facets_timed_out)
# Figure out columns and rows for the query
columns = [r[0] for r in results.description]
rows = list(results.rows)
# Expand labeled columns if requested
expanded_columns = []
expandable_columns = await self.expandable_columns(database, table)
columns_to_expand = None
try:
all_labels = value_as_boolean(special_args.get("_labels", ""))
except ValueError:
all_labels = default_labels
# Check for explicit _label=
if "_label" in request.args:
columns_to_expand = request.args.getlist("_label")
if columns_to_expand is None and all_labels:
# expand all columns with foreign keys
columns_to_expand = [fk["column"] for fk, _ in expandable_columns]
if columns_to_expand:
expanded_labels = {}
for fk, _ in expandable_columns:
column = fk["column"]
if column not in columns_to_expand:
continue
expanded_columns.append(column)
# Gather the values
column_index = columns.index(column)
values = [row[column_index] for row in rows]
# Expand them
expanded_labels.update(
await self.ds.expand_foreign_keys(database, table, column, values)
)
if expanded_labels:
# Rewrite the rows
new_rows = []
for row in rows:
new_row = CustomRow(columns)
for column in row.keys():
value = row[column]
if (column, value) in expanded_labels and value is not None:
new_row[column] = {
"value": value,
"label": expanded_labels[(column, value)],
}
else:
new_row[column] = value
new_rows.append(new_row)
rows = new_rows
# Pagination next link
next_value = None
next_url = None
if len(rows) > page_size and page_size > 0:
if is_view:
next_value = int(_next or 0) + page_size
else:
next_value = path_from_row_pks(rows[-2], pks, use_rowid)
# If there's a sort or sort_desc, add that value as a prefix
if (sort or sort_desc) and not is_view:
prefix = rows[-2][sort or sort_desc]
if isinstance(prefix, dict) and "value" in prefix:
prefix = prefix["value"]
if prefix is None:
prefix = "$null"
else:
prefix = urllib.parse.quote_plus(str(prefix))
next_value = f"{prefix},{next_value}"
added_args = {"_next": next_value}
if sort:
added_args["_sort"] = sort
else:
added_args["_sort_desc"] = sort_desc
else:
added_args = {"_next": next_value}
next_url = self.ds.absolute_url(
request, path_with_replaced_args(request, added_args)
)
rows = rows[:page_size]
# Detect suggested facets
suggested_facets = []
if (
self.ds.setting("suggest_facets")
and self.ds.setting("allow_facet")
and not _next
):
for facet in facet_instances:
suggested_facets.extend(await facet.suggest())
# human_description_en combines filters AND search, if provided
human_description_en = filters.human_description_en(extra=extra_human_descriptions)
if sort or sort_desc:
sorted_by = "sorted by {}{}".format(
(sort or sort_desc), " descending" if sort_desc else ""
)
human_description_en = " ".join(
[b for b in [human_description_en, sorted_by] if b]
)
async def extra_template():
nonlocal sort
display_columns, display_rows = await self.display_columns_and_rows(
database,
table,
results.description,
rows,
link_column=not is_view,
truncate_cells=self.ds.setting("truncate_cells_html"),
)
metadata = (
(self.ds.metadata("databases") or {})
.get(database, {})
.get("tables", {})
.get(table, {})
)
self.ds.update_with_inherited_metadata(metadata)
form_hidden_args = []
# Add currently selected facets
for arg in special_args:
if arg == "_facet" or arg.startswith("_facet_"):
form_hidden_args.extend(
(arg, item) for item in request.args.getlist(arg)
)
for arg in ("_fts_table", "_fts_pk"):
if arg in special_args:
form_hidden_args.append((arg, special_args[arg]))
if request.args.get("_where"):
for where_text in request.args.getlist("_where"):
form_hidden_args.append(("_where", where_text))
# if no sort specified AND table has a single primary key,
# set sort to that so arrow is displayed
if not sort and not sort_desc:
if 1 == len(pks):
sort = pks[0]
elif use_rowid:
sort = "rowid"
async def table_actions():
links = []
for hook in pm.hook.table_actions(
datasette=self.ds,
table=table,
database=database,
actor=request.actor,
):
extra_links = await await_me_maybe(hook)
if extra_links:
links.extend(extra_links)
return links
return {
"table_actions": table_actions,
"supports_search": bool(fts_table),
"search": search or "",
"use_rowid": use_rowid,
"filters": filters,
"display_columns": display_columns,
"filter_columns": columns,
"display_rows": display_rows,
"facets_timed_out": facets_timed_out,
"sorted_facet_results": sorted(
facet_results.values(),
key=lambda f: (len(f["results"]), f["name"]),
reverse=True,
),
"extra_wheres_for_ui": extra_wheres_for_ui,
"form_hidden_args": form_hidden_args,
"is_sortable": any(c["sortable"] for c in display_columns),
"path_with_replaced_args": path_with_replaced_args,
"path_with_removed_args": path_with_removed_args,
"append_querystring": append_querystring,
"request": request,
"sort": sort,
"sort_desc": sort_desc,
"disable_sort": is_view,
"custom_table_templates": [
f"_table-{to_css_class(database)}-{to_css_class(table)}.html",
f"_table-table-{to_css_class(database)}-{to_css_class(table)}.html",
"_table.html",
],
"metadata": metadata,
"view_definition": await db.get_view_definition(table),
"table_definition": await db.get_table_definition(table),
}
return (
{
"database": database,
"table": table,
"is_view": is_view,
"human_description_en": human_description_en,
"rows": rows[:page_size],
"truncated": results.truncated,
"filtered_table_rows_count": filtered_table_rows_count,
"expanded_columns": expanded_columns,
"expandable_columns": expandable_columns,
"columns": columns,
"primary_keys": pks,
"units": units,
"query": {"sql": sql, "params": params},
"facet_results": facet_results,
"suggested_facets": suggested_facets,
"next": next_value and str(next_value) or None,
"next_url": next_url,
"private": private,
"allow_execute_sql": await self.ds.permission_allowed(
request.actor, "execute-sql", database, default=True
),
},
extra_template,
(
f"table-{to_css_class(database)}-{to_css_class(table)}.html",
"table.html",
),
)
|
https://github.com/simonw/datasette/issues/1134
|
Traceback (most recent call last):
File "/Users/cjk/.local/share/virtualenvs/minutes-jMDZ8Ssk/lib/python3.7/site-packages/datasette/utils/asgi.py", line 122, in route_path
return await view(new_scope, receive, send)
File "/Users/cjk/.local/share/virtualenvs/minutes-jMDZ8Ssk/lib/python3.7/site-packages/datasette/utils/asgi.py", line 196, in view
request, **scope["url_route"]["kwargs"]
File "/Users/cjk/.local/share/virtualenvs/minutes-jMDZ8Ssk/lib/python3.7/site-packages/datasette/views/base.py", line 204, in get
request, database, hash, correct_hash_provided, **kwargs
File "/Users/cjk/.local/share/virtualenvs/minutes-jMDZ8Ssk/lib/python3.7/site-packages/datasette/views/base.py", line 342, in view_get
request, database, hash, **kwargs
File "/Users/cjk/.local/share/virtualenvs/minutes-jMDZ8Ssk/lib/python3.7/site-packages/datasette/views/table.py", line 393, in data
search_col = key.split("_search_", 1)[1]
IndexError: list index out of range
|
IndexError
|
async def options(self, request, *args, **kwargs):
r = Response.text("ok")
if self.ds.cors:
r.headers["Access-Control-Allow-Origin"] = "*"
return r
|
def options(self, request, *args, **kwargs):
r = Response.text("ok")
if self.ds.cors:
r.headers["Access-Control-Allow-Origin"] = "*"
return r
|
https://github.com/simonw/datasette/issues/1100
|
Traceback (most recent call last):
File "[path-to-python]/site-packages/datasette/app.py", line 1033, in route_path
response = await view(request, send)
File "[path-to-python]/site-packages/datasette/views/base.py", line 146, in view
request, **request.scope["url_route"]["kwargs"]
File "[path-to-python]/site-packages/datasette/views/base.py", line 118, in dispatch_request
return await handler(request, *args, **kwargs)
TypeError: object Response can't be used in 'await' expression
|
TypeError
|
def link_or_copy_directory(src, dst):
try:
copytree(src, dst, copy_function=os.link, dirs_exist_ok=True)
except OSError:
copytree(src, dst, dirs_exist_ok=True)
|
def link_or_copy_directory(src, dst):
try:
shutil.copytree(src, dst, copy_function=os.link, dirs_exist_ok=True)
except OSError:
shutil.copytree(src, dst, dirs_exist_ok=True)
|
https://github.com/simonw/datasette/issues/744
|
Traceback (most recent call last):
File "/home/aborruso/.local/lib/python3.7/site-packages/datasette/utils/__init__.py", line 607, in link_or_copy_directory
shutil.copytree(src, dst, copy_function=os.link)
File "/usr/lib/python3.7/shutil.py", line 365, in copytree
raise Error(errors)
shutil.Error: [('/myfolder/youtubeComunePalermo/processing/./template/base.html', '/tmp/tmps9_4mzc4/templates/base.html', "[Errno 18] Invalid cross-device link: '/myfolder/youtubeComunePalermo/processing/./template/base.html' -> '/tmp/tmps9_4mzc4/templates/base.html'"), ('/myfolder/youtubeComunePalermo/processing/./template/index.html', '/tmp/tmps9_4mzc4/templates/index.html', "[Errno 18] Invalid cross-device link: '/myfolder/youtubeComunePalermo/processing/./template/index.html' -> '/tmp/tmps9_4mzc4/templates/index.html'")]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/aborruso/.local/bin/datasette", line 8, in <module>
sys.exit(cli())
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/aborruso/.local/lib/python3.7/site-packages/datasette/publish/heroku.py", line 103, in heroku
extra_metadata,
File "/usr/lib/python3.7/contextlib.py", line 112, in __enter__
return next(self.gen)
File "/home/aborruso/.local/lib/python3.7/site-packages/datasette/publish/heroku.py", line 191, in temporary_heroku_directory
os.path.join(tmp.name, "templates"),
File "/home/aborruso/.local/lib/python3.7/site-packages/datasette/utils/__init__.py", line 609, in link_or_copy_directory
shutil.copytree(src, dst)
File "/usr/lib/python3.7/shutil.py", line 321, in copytree
os.makedirs(dst)
File "/usr/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/tmp/tmps9_4mzc4/templates'
|
shutil.Error
|
def link_or_copy_directory(src, dst):
try:
shutil.copytree(src, dst, copy_function=os.link, dirs_exist_ok=True)
except OSError:
shutil.copytree(src, dst, dirs_exist_ok=True)
|
def link_or_copy_directory(src, dst):
try:
shutil.copytree(src, dst, copy_function=os.link)
except OSError:
shutil.copytree(src, dst)
|
https://github.com/simonw/datasette/issues/744
|
Traceback (most recent call last):
File "/home/aborruso/.local/lib/python3.7/site-packages/datasette/utils/__init__.py", line 607, in link_or_copy_directory
shutil.copytree(src, dst, copy_function=os.link)
File "/usr/lib/python3.7/shutil.py", line 365, in copytree
raise Error(errors)
shutil.Error: [('/myfolder/youtubeComunePalermo/processing/./template/base.html', '/tmp/tmps9_4mzc4/templates/base.html', "[Errno 18] Invalid cross-device link: '/myfolder/youtubeComunePalermo/processing/./template/base.html' -> '/tmp/tmps9_4mzc4/templates/base.html'"), ('/myfolder/youtubeComunePalermo/processing/./template/index.html', '/tmp/tmps9_4mzc4/templates/index.html', "[Errno 18] Invalid cross-device link: '/myfolder/youtubeComunePalermo/processing/./template/index.html' -> '/tmp/tmps9_4mzc4/templates/index.html'")]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/aborruso/.local/bin/datasette", line 8, in <module>
sys.exit(cli())
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/aborruso/.local/lib/python3.7/site-packages/datasette/publish/heroku.py", line 103, in heroku
extra_metadata,
File "/usr/lib/python3.7/contextlib.py", line 112, in __enter__
return next(self.gen)
File "/home/aborruso/.local/lib/python3.7/site-packages/datasette/publish/heroku.py", line 191, in temporary_heroku_directory
os.path.join(tmp.name, "templates"),
File "/home/aborruso/.local/lib/python3.7/site-packages/datasette/utils/__init__.py", line 609, in link_or_copy_directory
shutil.copytree(src, dst)
File "/usr/lib/python3.7/shutil.py", line 321, in copytree
os.makedirs(dst)
File "/usr/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/tmp/tmps9_4mzc4/templates'
|
shutil.Error
|
def link_or_copy(src, dst):
# Intended for use in populating a temp directory. We link if possible,
# but fall back to copying if the temp directory is on a different device
# https://github.com/simonw/datasette/issues/141
try:
os.link(src, dst)
except shutil.Error:
shutil.copyfile(src, dst)
|
def link_or_copy(src, dst):
# Intended for use in populating a temp directory. We link if possible,
# but fall back to copying if the temp directory is on a different device
# https://github.com/simonw/datasette/issues/141
try:
os.link(src, dst)
except OSError:
shutil.copyfile(src, dst)
|
https://github.com/simonw/datasette/issues/744
|
Traceback (most recent call last):
File "/home/aborruso/.local/lib/python3.7/site-packages/datasette/utils/__init__.py", line 607, in link_or_copy_directory
shutil.copytree(src, dst, copy_function=os.link)
File "/usr/lib/python3.7/shutil.py", line 365, in copytree
raise Error(errors)
shutil.Error: [('/myfolder/youtubeComunePalermo/processing/./template/base.html', '/tmp/tmps9_4mzc4/templates/base.html', "[Errno 18] Invalid cross-device link: '/myfolder/youtubeComunePalermo/processing/./template/base.html' -> '/tmp/tmps9_4mzc4/templates/base.html'"), ('/myfolder/youtubeComunePalermo/processing/./template/index.html', '/tmp/tmps9_4mzc4/templates/index.html', "[Errno 18] Invalid cross-device link: '/myfolder/youtubeComunePalermo/processing/./template/index.html' -> '/tmp/tmps9_4mzc4/templates/index.html'")]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/aborruso/.local/bin/datasette", line 8, in <module>
sys.exit(cli())
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/aborruso/.local/lib/python3.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/aborruso/.local/lib/python3.7/site-packages/datasette/publish/heroku.py", line 103, in heroku
extra_metadata,
File "/usr/lib/python3.7/contextlib.py", line 112, in __enter__
return next(self.gen)
File "/home/aborruso/.local/lib/python3.7/site-packages/datasette/publish/heroku.py", line 191, in temporary_heroku_directory
os.path.join(tmp.name, "templates"),
File "/home/aborruso/.local/lib/python3.7/site-packages/datasette/utils/__init__.py", line 609, in link_or_copy_directory
shutil.copytree(src, dst)
File "/usr/lib/python3.7/shutil.py", line 321, in copytree
os.makedirs(dst)
File "/usr/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/tmp/tmps9_4mzc4/templates'
|
shutil.Error
|
def path(self):
if "raw_path" in self.scope:
return self.scope["raw_path"].decode("latin-1")
else:
return self.scope["path"].decode("utf-8")
|
def path(self):
return (self.scope.get("raw_path", self.scope["path"].encode("latin-1"))).decode(
"latin-1"
)
|
https://github.com/simonw/datasette/issues/558
|
Traceback (most recent call last):
File "/home/zhe/miniconda3/lib/python3.7/site-packages/datasette/utils/asgi.py", line 100, in __call__
return await view(new_scope, receive, send)
File "/home/zhe/miniconda3/lib/python3.7/site-packages/datasette/utils/asgi.py", line 172, in view
request, **scope["url_route"]["kwargs"]
File "/home/zhe/miniconda3/lib/python3.7/site-packages/datasette/views/base.py", line 267, in get
request, database, hash, correct_hash_provided, **kwargs
File "/home/zhe/miniconda3/lib/python3.7/site-packages/datasette/views/base.py", line 471, in view_get
for key in self.ds.renderers.keys()
File "/home/zhe/miniconda3/lib/python3.7/site-packages/datasette/views/base.py", line 471, in <dictcomp>
for key in self.ds.renderers.keys()
File "/home/zhe/miniconda3/lib/python3.7/site-packages/datasette/utils/__init__.py", line 655, in path_with_format
path = request.path
File "/home/zhe/miniconda3/lib/python3.7/site-packages/datasette/utils/asgi.py", line 49, in path
self.scope.get("raw_path", self.scope["path"].encode("latin-1"))
UnicodeEncodeError: 'latin-1' codec can't encode characters in position 9-11: ordinal not in range(256)
|
UnicodeEncodeError
|
def generate_new_attacked_text(self, new_words):
"""Returns a new AttackedText object and replaces old list of words
with a new list of words, but preserves the punctuation and spacing of
the original message.
``self.words`` is a list of the words in the current text with
punctuation removed. However, each "word" in ``new_words`` could
be an empty string, representing a word deletion, or a string
with multiple space-separated words, representation an insertion
of one or more words.
"""
perturbed_text = ""
original_text = AttackedText.SPLIT_TOKEN.join(self._text_input.values())
new_attack_attrs = dict()
if "label_names" in self.attack_attrs:
new_attack_attrs["label_names"] = self.attack_attrs["label_names"]
new_attack_attrs["newly_modified_indices"] = set()
# Point to previously monitored text.
new_attack_attrs["previous_attacked_text"] = self
# Use `new_attack_attrs` to track indices with respect to the original
# text.
new_attack_attrs["modified_indices"] = self.attack_attrs["modified_indices"].copy()
new_attack_attrs["original_index_map"] = self.attack_attrs[
"original_index_map"
].copy()
new_i = 0
# Create the new attacked text by swapping out words from the original
# text with a sequence of 0+ words in the new text.
for i, (input_word, adv_word_seq) in enumerate(zip(self.words, new_words)):
word_start = original_text.index(input_word)
word_end = word_start + len(input_word)
perturbed_text += original_text[:word_start]
original_text = original_text[word_end:]
adv_num_words = len(words_from_text(adv_word_seq))
num_words_diff = adv_num_words - len(words_from_text(input_word))
# Track indices on insertions and deletions.
if num_words_diff != 0:
# Re-calculated modified indices. If words are inserted or deleted,
# they could change.
shifted_modified_indices = set()
for modified_idx in new_attack_attrs["modified_indices"]:
if modified_idx < i:
shifted_modified_indices.add(modified_idx)
elif modified_idx > i:
shifted_modified_indices.add(modified_idx + num_words_diff)
else:
pass
new_attack_attrs["modified_indices"] = shifted_modified_indices
# Track insertions and deletions wrt original text.
# original_modification_idx = i
new_idx_map = new_attack_attrs["original_index_map"].copy()
if num_words_diff == -1:
new_idx_map[new_idx_map == i] = -1
new_idx_map[new_idx_map > i] += num_words_diff
new_attack_attrs["original_index_map"] = new_idx_map
# Move pointer and save indices of new modified words.
for j in range(i, i + adv_num_words):
if input_word != adv_word_seq:
new_attack_attrs["modified_indices"].add(new_i)
new_attack_attrs["newly_modified_indices"].add(new_i)
new_i += 1
# Check spaces for deleted text.
if adv_num_words == 0 and len(original_text):
# Remove extra space (or else there would be two spaces for each
# deleted word).
# @TODO What to do with punctuation in this case? This behavior is undefined.
if i == 0:
# If the first word was deleted, take a subsequent space.
if original_text[0] == " ":
original_text = original_text[1:]
else:
# If a word other than the first was deleted, take a preceding space.
if perturbed_text[-1] == " ":
perturbed_text = perturbed_text[:-1]
# Add substitute word(s) to new sentence.
perturbed_text += adv_word_seq
perturbed_text += original_text # Add all of the ending punctuation.
# Reform perturbed_text into an OrderedDict.
perturbed_input_texts = perturbed_text.split(AttackedText.SPLIT_TOKEN)
perturbed_input = OrderedDict(zip(self._text_input.keys(), perturbed_input_texts))
return AttackedText(perturbed_input, attack_attrs=new_attack_attrs)
|
def generate_new_attacked_text(self, new_words):
"""Returns a new AttackedText object and replaces old list of words
with a new list of words, but preserves the punctuation and spacing of
the original message.
``self.words`` is a list of the words in the current text with
punctuation removed. However, each "word" in ``new_words`` could
be an empty string, representing a word deletion, or a string
with multiple space-separated words, representation an insertion
of one or more words.
"""
perturbed_text = ""
original_text = AttackedText.SPLIT_TOKEN.join(self._text_input.values())
new_attack_attrs = dict()
if "label_names" in self.attack_attrs:
new_attack_attrs["label_names"] = self.attack_attrs["label_names"]
new_attack_attrs["newly_modified_indices"] = set()
# Point to previously monitored text.
new_attack_attrs["previous_attacked_text"] = self
# Use `new_attack_attrs` to track indices with respect to the original
# text.
new_attack_attrs["modified_indices"] = self.attack_attrs["modified_indices"].copy()
new_attack_attrs["original_index_map"] = self.attack_attrs[
"original_index_map"
].copy()
new_i = 0
# Create the new attacked text by swapping out words from the original
# text with a sequence of 0+ words in the new text.
for i, (input_word, adv_word_seq) in enumerate(zip(self.words, new_words)):
word_start = original_text.index(input_word)
word_end = word_start + len(input_word)
perturbed_text += original_text[:word_start]
original_text = original_text[word_end:]
adv_num_words = len(words_from_text(adv_word_seq))
num_words_diff = adv_num_words - len(words_from_text(input_word))
# Track indices on insertions and deletions.
if num_words_diff != 0:
# Re-calculated modified indices. If words are inserted or deleted,
# they could change.
shifted_modified_indices = set()
for modified_idx in new_attack_attrs["modified_indices"]:
if modified_idx < i:
shifted_modified_indices.add(modified_idx)
elif modified_idx > i:
shifted_modified_indices.add(modified_idx + num_words_diff)
else:
pass
new_attack_attrs["modified_indices"] = shifted_modified_indices
# Track insertions and deletions wrt original text.
# original_modification_idx = i
new_idx_map = new_attack_attrs["original_index_map"].copy()
if num_words_diff == -1:
new_idx_map[new_idx_map == i] = -1
new_idx_map[new_idx_map > i] += num_words_diff
new_attack_attrs["original_index_map"] = new_idx_map
# Move pointer and save indices of new modified words.
for j in range(i, i + adv_num_words):
if input_word != adv_word_seq:
new_attack_attrs["modified_indices"].add(new_i)
new_attack_attrs["newly_modified_indices"].add(new_i)
new_i += 1
# Check spaces for deleted text.
if adv_num_words == 0:
# Remove extra space (or else there would be two spaces for each
# deleted word).
# @TODO What to do with punctuation in this case? This behavior is undefined.
if i == 0:
# If the first word was deleted, take a subsequent space.
if original_text[0] == " ":
original_text = original_text[1:]
else:
# If a word other than the first was deleted, take a preceding space.
if perturbed_text[-1] == " ":
perturbed_text = perturbed_text[:-1]
# Add substitute word(s) to new sentence.
perturbed_text += adv_word_seq
perturbed_text += original_text # Add all of the ending punctuation.
# Reform perturbed_text into an OrderedDict.
perturbed_input_texts = perturbed_text.split(AttackedText.SPLIT_TOKEN)
perturbed_input = OrderedDict(zip(self._text_input.keys(), perturbed_input_texts))
return AttackedText(perturbed_input, attack_attrs=new_attack_attrs)
|
https://github.com/QData/TextAttack/issues/238
|
Attack: 68%|██████████████████████████████████████████████████████████████████████████████████████████▌ | 676/1000 [04:33<02:11, 2.47it/s]
Traceback (most recent call last):
File "/u/jm8wx/.conda/envs/torch/bin/textattack", line 33, in <module>
sys.exit(load_entry_point('textattack', 'console_scripts', 'textattack')())
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/textattack_cli.py", line 39, in main
args.func.run(args)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/train_model/train_model_command.py", line 29, in run
train_model(args)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/train_model/run_training.py", line 580, in train_model
adv_attack_results = _generate_adversarial_examples(
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/train_model/run_training.py", line 270, in _generate_adversarial_examples
for adv_ex in tqdm.tqdm(
File "/u/jm8wx/.conda/envs/torch/lib/python3.8/site-packages/tqdm/std.py", line 1130, in __iter__
for obj in iterable:
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/shared/attack.py", line 270, in attack_dataset
result = self.attack_one(goal_function_result)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/shared/attack.py", line 203, in attack_one
final_result = self.search_method(initial_result)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/search_methods/search_method.py", line 29, in __call__
return self._perform_search(initial_result)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/search_methods/greedy_word_swap_wir.py", line 92, in _perform_search
index_order, search_over = self._get_index_order(attacked_text)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/search_methods/greedy_word_swap_wir.py", line 71, in _get_index_order
leave_one_texts = [
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/search_methods/greedy_word_swap_wir.py", line 72, in <listcomp>
initial_text.delete_word_at_index(i) for i in range(len_text)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/shared/attacked_text.py", line 233, in delete_word_at_index
return self.replace_word_at_index(index, "")
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/shared/attacked_text.py", line 228, in replace_word_at_index
return self.replace_words_at_indices([index], [new_word])
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/shared/attacked_text.py", line 219, in replace_words_at_indices
return self.generate_new_attacked_text(words)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/shared/attacked_text.py", line 330, in generate_new_attacked_text
if original_text[0] == " ":
IndexError: string index out of range
|
IndexError
|
def train_model(args):
logger.warn(
"WARNING: TextAttack's model training feature is in beta. Please report any issues on our Github page, https://github.com/QData/TextAttack/issues."
)
start_time = time.time()
make_directories(args.output_dir)
num_gpus = torch.cuda.device_count()
# Save logger writes to file
log_txt_path = os.path.join(args.output_dir, "log.txt")
fh = logging.FileHandler(log_txt_path)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
logger.info(f"Writing logs to {log_txt_path}.")
# Use Weights & Biases, if enabled.
if args.enable_wandb:
global wandb
import wandb
wandb.init(sync_tensorboard=True)
# Get list of text and list of label (integers) from disk.
train_text, train_labels, eval_text, eval_labels = dataset_from_args(args)
# Filter labels
if args.allowed_labels:
logger.info(f"Filtering samples with labels outside of {args.allowed_labels}.")
final_train_text, final_train_labels = [], []
for text, label in zip(train_text, train_labels):
if label in args.allowed_labels:
final_train_text.append(text)
final_train_labels.append(label)
logger.info(
f"Filtered {len(train_text)} train samples to {len(final_train_text)} points."
)
train_text, train_labels = final_train_text, final_train_labels
final_eval_text, final_eval_labels = [], []
for text, label in zip(eval_text, eval_labels):
if label in args.allowed_labels:
final_eval_text.append(text)
final_eval_labels.append(label)
logger.info(
f"Filtered {len(eval_text)} dev samples to {len(final_eval_text)} points."
)
eval_text, eval_labels = final_eval_text, final_eval_labels
label_id_len = len(train_labels)
label_set = set(train_labels)
args.num_labels = len(label_set)
logger.info(
f"Loaded dataset. Found: {args.num_labels} labels: ({sorted(label_set)})"
)
if isinstance(train_labels[0], float):
# TODO come up with a more sophisticated scheme for when to do regression
logger.warn(f"Detected float labels. Doing regression.")
args.num_labels = 1
args.do_regression = True
else:
args.do_regression = False
train_examples_len = len(train_text)
if len(train_labels) != train_examples_len:
raise ValueError(
f"Number of train examples ({train_examples_len}) does not match number of labels ({len(train_labels)})"
)
if len(eval_labels) != len(eval_text):
raise ValueError(
f"Number of teste xamples ({len(eval_text)}) does not match number of labels ({len(eval_labels)})"
)
model = model_from_args(args, args.num_labels)
tokenizer = model.tokenizer
logger.info(f"Tokenizing training data. (len: {train_examples_len})")
train_text_ids = batch_encode(tokenizer, train_text)
logger.info(f"Tokenizing eval data (len: {len(eval_labels)})")
eval_text_ids = batch_encode(tokenizer, eval_text)
load_time = time.time()
logger.info(f"Loaded data and tokenized in {load_time - start_time}s")
# multi-gpu training
if num_gpus > 1:
model = torch.nn.DataParallel(model)
logger.info(f"Training model across {num_gpus} GPUs")
num_train_optimization_steps = (
int(train_examples_len / args.batch_size / args.grad_accum_steps)
* args.num_train_epochs
)
param_optimizer = list(model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = transformers.optimization.AdamW(
optimizer_grouped_parameters, lr=args.learning_rate
)
scheduler = transformers.optimization.get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_proportion,
num_training_steps=num_train_optimization_steps,
)
global_step = 0
# Start Tensorboard and log hyperparams.
from tensorboardX import SummaryWriter
tb_writer = SummaryWriter(args.output_dir)
def is_writable_type(obj):
for ok_type in [bool, int, str, float]:
if isinstance(obj, ok_type):
return True
return False
args_dict = {k: v for k, v in vars(args).items() if is_writable_type(v)}
tb_writer.add_hparams(args_dict, {})
# Start training
logger.info("***** Running training *****")
logger.info(f"\tNum examples = {train_examples_len}")
logger.info(f"\tBatch size = {args.batch_size}")
logger.info(f"\tMax sequence length = {args.max_length}")
logger.info(f"\tNum steps = {num_train_optimization_steps}")
logger.info(f"\tNum epochs = {args.num_train_epochs}")
logger.info(f"\tLearning rate = {args.learning_rate}")
train_input_ids = np.array(train_text_ids)
train_labels = np.array(train_labels)
train_data = list((ids, label) for ids, label in zip(train_input_ids, train_labels))
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(
train_data, sampler=train_sampler, batch_size=args.batch_size
)
eval_input_ids = np.array(eval_text_ids)
eval_labels = np.array(eval_labels)
eval_data = list((ids, label) for ids, label in zip(eval_input_ids, eval_labels))
eval_sampler = RandomSampler(eval_data)
eval_dataloader = DataLoader(
eval_data, sampler=eval_sampler, batch_size=args.batch_size
)
def get_eval_score():
model.eval()
correct = 0
total = 0
logits = []
labels = []
for input_ids, batch_labels in eval_dataloader:
if isinstance(input_ids, dict):
## HACK: dataloader collates dict backwards. This is a temporary
# workaround to get ids in the right shape
input_ids = {
k: torch.stack(v).T.to(device) for k, v in input_ids.items()
}
batch_labels = batch_labels.to(device)
with torch.no_grad():
batch_logits = textattack.shared.utils.model_predict(model, input_ids)
logits.extend(batch_logits.cpu().squeeze().tolist())
labels.extend(batch_labels)
model.train()
logits = torch.tensor(logits)
labels = torch.tensor(labels)
if args.do_regression:
pearson_correlation, pearson_p_value = scipy.stats.pearsonr(logits, labels)
return pearson_correlation
else:
preds = logits.argmax(dim=1)
correct = (preds == labels).sum()
return float(correct) / len(labels)
def save_model():
model_to_save = (
model.module if hasattr(model, "module") else model
) # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(args.output_dir, args.weights_name)
output_config_file = os.path.join(args.output_dir, args.config_name)
torch.save(model_to_save.state_dict(), output_model_file)
try:
model_to_save.config.to_json_file(output_config_file)
except AttributeError:
# no config
pass
global_step = 0
def save_model_checkpoint():
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info(f"Checkpoint saved to {output_dir}.")
model.train()
args.best_eval_score = 0
args.best_eval_score_epoch = 0
args.epochs_since_best_eval_score = 0
def loss_backward(loss):
if num_gpus > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.grad_accum_steps > 1:
loss = loss / args.grad_accum_steps
loss.backward()
return loss
for epoch in tqdm.trange(
int(args.num_train_epochs), desc="Epoch", position=0, leave=False
):
prog_bar = tqdm.tqdm(
train_dataloader, desc="Iteration", position=1, leave=False
)
for step, batch in enumerate(prog_bar):
input_ids, labels = batch
labels = labels.to(device)
if isinstance(input_ids, dict):
## HACK: dataloader collates dict backwards. This is a temporary
# workaround to get ids in the right shape
input_ids = {
k: torch.stack(v).T.to(device) for k, v in input_ids.items()
}
logits = textattack.shared.utils.model_predict(model, input_ids)
if args.do_regression:
# TODO integrate with textattack `metrics` package
loss_fct = torch.nn.MSELoss()
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss_fct = torch.nn.CrossEntropyLoss()
loss = loss_fct(logits, labels)
loss = loss_backward(loss)
if global_step % args.tb_writer_step == 0:
tb_writer.add_scalar("loss", loss.item(), global_step)
tb_writer.add_scalar("lr", scheduler.get_last_lr()[0], global_step)
prog_bar.set_description(f"Loss {loss.item()}")
if (step + 1) % args.grad_accum_steps == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
# Save model checkpoint to file.
if (
global_step > 0
and (args.checkpoint_steps > 0)
and (global_step % args.checkpoint_steps) == 0
):
save_model_checkpoint()
model.zero_grad()
# Inc step counter.
global_step += 1
# Check accuracy after each epoch.
eval_score = get_eval_score()
tb_writer.add_scalar("epoch_eval_score", eval_score, global_step)
if args.checkpoint_every_epoch:
save_model_checkpoint()
logger.info(
f"Eval {'pearson correlation' if args.do_regression else 'accuracy'}: {eval_score * 100}%"
)
if eval_score > args.best_eval_score:
args.best_eval_score = eval_score
args.best_eval_score_epoch = epoch
args.epochs_since_best_eval_score = 0
save_model()
logger.info(f"Best acc found. Saved model to {args.output_dir}.")
else:
args.epochs_since_best_eval_score += 1
if (args.early_stopping_epochs > 0) and (
args.epochs_since_best_eval_score > args.early_stopping_epochs
):
logger.info(
f"Stopping early since it's been {args.early_stopping_epochs} steps since validation acc increased"
)
break
# end of training, save tokenizer
try:
tokenizer.save_pretrained(args.output_dir)
logger.info(f"Saved tokenizer {tokenizer} to {args.output_dir}.")
except AttributeError:
logger.warn(
f"Error: could not save tokenizer {tokenizer} to {args.output_dir}."
)
# Save a little readme with model info
write_readme(args, args.best_eval_score, args.best_eval_score_epoch)
# Save args to file
args_save_path = os.path.join(args.output_dir, "train_args.json")
final_args_dict = {k: v for k, v in vars(args).items() if is_writable_type(v)}
with open(args_save_path, "w", encoding="utf-8") as f:
f.write(json.dumps(final_args_dict, indent=2) + "\n")
logger.info(f"Wrote training args to {args_save_path}.")
|
def train_model(args):
logger.warn(
"WARNING: TextAttack's model training feature is in beta. Please report any issues on our Github page, https://github.com/QData/TextAttack/issues."
)
start_time = time.time()
make_directories(args.output_dir)
num_gpus = torch.cuda.device_count()
# Save logger writes to file
log_txt_path = os.path.join(args.output_dir, "log.txt")
fh = logging.FileHandler(log_txt_path)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
logger.info(f"Writing logs to {log_txt_path}.")
# Use Weights & Biases, if enabled.
if args.enable_wandb:
wandb.init(sync_tensorboard=True)
# Get list of text and list of label (integers) from disk.
train_text, train_labels, eval_text, eval_labels = dataset_from_args(args)
# Filter labels
if args.allowed_labels:
logger.info(f"Filtering samples with labels outside of {args.allowed_labels}.")
final_train_text, final_train_labels = [], []
for text, label in zip(train_text, train_labels):
if label in args.allowed_labels:
final_train_text.append(text)
final_train_labels.append(label)
logger.info(
f"Filtered {len(train_text)} train samples to {len(final_train_text)} points."
)
train_text, train_labels = final_train_text, final_train_labels
final_eval_text, final_eval_labels = [], []
for text, label in zip(eval_text, eval_labels):
if label in args.allowed_labels:
final_eval_text.append(text)
final_eval_labels.append(label)
logger.info(
f"Filtered {len(eval_text)} dev samples to {len(final_eval_text)} points."
)
eval_text, eval_labels = final_eval_text, final_eval_labels
label_id_len = len(train_labels)
label_set = set(train_labels)
args.num_labels = len(label_set)
logger.info(
f"Loaded dataset. Found: {args.num_labels} labels: ({sorted(label_set)})"
)
if isinstance(train_labels[0], float):
# TODO come up with a more sophisticated scheme for when to do regression
logger.warn(f"Detected float labels. Doing regression.")
args.num_labels = 1
args.do_regression = True
else:
args.do_regression = False
train_examples_len = len(train_text)
if len(train_labels) != train_examples_len:
raise ValueError(
f"Number of train examples ({train_examples_len}) does not match number of labels ({len(train_labels)})"
)
if len(eval_labels) != len(eval_text):
raise ValueError(
f"Number of teste xamples ({len(eval_text)}) does not match number of labels ({len(eval_labels)})"
)
model = model_from_args(args, args.num_labels)
tokenizer = model.tokenizer
logger.info(f"Tokenizing training data. (len: {train_examples_len})")
train_text_ids = batch_encode(tokenizer, train_text)
logger.info(f"Tokenizing eval data (len: {len(eval_labels)})")
eval_text_ids = batch_encode(tokenizer, eval_text)
load_time = time.time()
logger.info(f"Loaded data and tokenized in {load_time - start_time}s")
# multi-gpu training
if num_gpus > 1:
model = torch.nn.DataParallel(model)
logger.info(f"Training model across {num_gpus} GPUs")
num_train_optimization_steps = (
int(train_examples_len / args.batch_size / args.grad_accum_steps)
* args.num_train_epochs
)
param_optimizer = list(model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = transformers.optimization.AdamW(
optimizer_grouped_parameters, lr=args.learning_rate
)
scheduler = transformers.optimization.get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_proportion,
num_training_steps=num_train_optimization_steps,
)
global_step = 0
# Start Tensorboard and log hyperparams.
from tensorboardX import SummaryWriter
tb_writer = SummaryWriter(args.output_dir)
def is_writable_type(obj):
for ok_type in [bool, int, str, float]:
if isinstance(obj, ok_type):
return True
return False
args_dict = {k: v for k, v in vars(args).items() if is_writable_type(v)}
tb_writer.add_hparams(args_dict, {})
# Start training
logger.info("***** Running training *****")
logger.info(f"\tNum examples = {train_examples_len}")
logger.info(f"\tBatch size = {args.batch_size}")
logger.info(f"\tMax sequence length = {args.max_length}")
logger.info(f"\tNum steps = {num_train_optimization_steps}")
logger.info(f"\tNum epochs = {args.num_train_epochs}")
logger.info(f"\tLearning rate = {args.learning_rate}")
train_input_ids = np.array(train_text_ids)
train_labels = np.array(train_labels)
train_data = list((ids, label) for ids, label in zip(train_input_ids, train_labels))
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(
train_data, sampler=train_sampler, batch_size=args.batch_size
)
eval_input_ids = np.array(eval_text_ids)
eval_labels = np.array(eval_labels)
eval_data = list((ids, label) for ids, label in zip(eval_input_ids, eval_labels))
eval_sampler = RandomSampler(eval_data)
eval_dataloader = DataLoader(
eval_data, sampler=eval_sampler, batch_size=args.batch_size
)
def get_eval_score():
model.eval()
correct = 0
total = 0
logits = []
labels = []
for input_ids, batch_labels in eval_dataloader:
if isinstance(input_ids, dict):
## HACK: dataloader collates dict backwards. This is a temporary
# workaround to get ids in the right shape
input_ids = {
k: torch.stack(v).T.to(device) for k, v in input_ids.items()
}
batch_labels = batch_labels.to(device)
with torch.no_grad():
batch_logits = textattack.shared.utils.model_predict(model, input_ids)
logits.extend(batch_logits.cpu().squeeze().tolist())
labels.extend(batch_labels)
model.train()
logits = torch.tensor(logits)
labels = torch.tensor(labels)
if args.do_regression:
pearson_correlation, pearson_p_value = scipy.stats.pearsonr(logits, labels)
return pearson_correlation
else:
preds = logits.argmax(dim=1)
correct = (preds == labels).sum()
return float(correct) / len(labels)
def save_model():
model_to_save = (
model.module if hasattr(model, "module") else model
) # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(args.output_dir, args.weights_name)
output_config_file = os.path.join(args.output_dir, args.config_name)
torch.save(model_to_save.state_dict(), output_model_file)
try:
model_to_save.config.to_json_file(output_config_file)
except AttributeError:
# no config
pass
global_step = 0
def save_model_checkpoint():
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info(f"Checkpoint saved to {output_dir}.")
model.train()
args.best_eval_score = 0
args.best_eval_score_epoch = 0
args.epochs_since_best_eval_score = 0
def loss_backward(loss):
if num_gpus > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.grad_accum_steps > 1:
loss = loss / args.grad_accum_steps
loss.backward()
return loss
for epoch in tqdm.trange(
int(args.num_train_epochs), desc="Epoch", position=0, leave=False
):
prog_bar = tqdm.tqdm(
train_dataloader, desc="Iteration", position=1, leave=False
)
for step, batch in enumerate(prog_bar):
input_ids, labels = batch
labels = labels.to(device)
if isinstance(input_ids, dict):
## HACK: dataloader collates dict backwards. This is a temporary
# workaround to get ids in the right shape
input_ids = {
k: torch.stack(v).T.to(device) for k, v in input_ids.items()
}
logits = textattack.shared.utils.model_predict(model, input_ids)
if args.do_regression:
# TODO integrate with textattack `metrics` package
loss_fct = torch.nn.MSELoss()
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss_fct = torch.nn.CrossEntropyLoss()
loss = loss_fct(logits, labels)
loss = loss_backward(loss)
if global_step % args.tb_writer_step == 0:
tb_writer.add_scalar("loss", loss.item(), global_step)
tb_writer.add_scalar("lr", scheduler.get_last_lr()[0], global_step)
prog_bar.set_description(f"Loss {loss.item()}")
if (step + 1) % args.grad_accum_steps == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
# Save model checkpoint to file.
if (
global_step > 0
and (args.checkpoint_steps > 0)
and (global_step % args.checkpoint_steps) == 0
):
save_model_checkpoint()
model.zero_grad()
# Inc step counter.
global_step += 1
# Check accuracy after each epoch.
eval_score = get_eval_score()
tb_writer.add_scalar("epoch_eval_score", eval_score, global_step)
if args.checkpoint_every_epoch:
save_model_checkpoint()
logger.info(
f"Eval {'pearson correlation' if args.do_regression else 'accuracy'}: {eval_score * 100}%"
)
if eval_score > args.best_eval_score:
args.best_eval_score = eval_score
args.best_eval_score_epoch = epoch
args.epochs_since_best_eval_score = 0
save_model()
logger.info(f"Best acc found. Saved model to {args.output_dir}.")
else:
args.epochs_since_best_eval_score += 1
if (args.early_stopping_epochs > 0) and (
args.epochs_since_best_eval_score > args.early_stopping_epochs
):
logger.info(
f"Stopping early since it's been {args.early_stopping_epochs} steps since validation acc increased"
)
break
# end of training, save tokenizer
try:
tokenizer.save_pretrained(args.output_dir)
logger.info(f"Saved tokenizer {tokenizer} to {args.output_dir}.")
except AttributeError:
logger.warn(
f"Error: could not save tokenizer {tokenizer} to {args.output_dir}."
)
# Save a little readme with model info
write_readme(args, args.best_eval_score, args.best_eval_score_epoch)
# Save args to file
args_save_path = os.path.join(args.output_dir, "train_args.json")
final_args_dict = {k: v for k, v in vars(args).items() if is_writable_type(v)}
with open(args_save_path, "w", encoding="utf-8") as f:
f.write(json.dumps(final_args_dict, indent=2) + "\n")
logger.info(f"Wrote training args to {args_save_path}.")
|
https://github.com/QData/TextAttack/issues/165
|
$ textattack train --model distilbert-base-uncased --dataset glue:wnli --batch-size 128 --epochs 5 --max-length 256 --learning-rate 3e-05
...
Traceback (most recent call last):
File "/u/jm8wx/.conda/envs/torch/bin/textattack", line 33, in <module>
sys.exit(load_entry_point('textattack', 'console_scripts', 'textattack')())
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/textattack_cli.py", line 41, in main
args.func.run(args)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/train_model/train_model_command.py", line 30, in run
train_model(args)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/train_model/run_training.py", line 108, in train_model
train_text_ids = batch_encode(tokenizer, train_text)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/train_model/run_training.py", line 28, in batch_encode
return tokenizer.batch_encode(text_list)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/models/tokenizers/auto_tokenizer.py", line 57, in batch_encode
encodings = self.tokenizer.batch_encode_plus(
File "/u/jm8wx/.conda/envs/torch/lib/python3.8/site-packages/transformers/tokenization_utils.py", line 2451, in batch_encode_plus
raise ValueError(
ValueError: batch_text_or_text_pairs has to be a list (got <class 'tuple'>)
textattack train --model distilbert-base-uncased --dataset glue:wnli --batch-size 64 --epochs 5 --max-length 256 --learning-rate 5e-05
|
ValueError
|
def prepare_dataset_for_training(nlp_dataset):
"""Changes an `nlp` dataset into the proper format for tokenization."""
def prepare_example_dict(ex):
"""Returns the values in order corresponding to the data.
ex:
'Some text input'
or in the case of multi-sequence inputs:
('The premise', 'the hypothesis',)
etc.
"""
values = list(ex.values())
if len(values) == 1:
return values[0]
return tuple(values)
text, outputs = zip(*((prepare_example_dict(x[0]), x[1]) for x in nlp_dataset))
return list(text), list(outputs)
|
def prepare_dataset_for_training(nlp_dataset):
"""Changes an `nlp` dataset into the proper format for tokenization."""
def prepare_example_dict(ex):
"""Returns the values in order corresponding to the data.
ex:
'Some text input'
or in the case of multi-sequence inputs:
('The premise', 'the hypothesis',)
etc.
"""
values = list(ex.values())
if len(values) == 1:
return values[0]
return values
return zip(*((prepare_example_dict(x[0]), x[1]) for x in nlp_dataset))
|
https://github.com/QData/TextAttack/issues/165
|
$ textattack train --model distilbert-base-uncased --dataset glue:wnli --batch-size 128 --epochs 5 --max-length 256 --learning-rate 3e-05
...
Traceback (most recent call last):
File "/u/jm8wx/.conda/envs/torch/bin/textattack", line 33, in <module>
sys.exit(load_entry_point('textattack', 'console_scripts', 'textattack')())
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/textattack_cli.py", line 41, in main
args.func.run(args)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/train_model/train_model_command.py", line 30, in run
train_model(args)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/train_model/run_training.py", line 108, in train_model
train_text_ids = batch_encode(tokenizer, train_text)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/train_model/run_training.py", line 28, in batch_encode
return tokenizer.batch_encode(text_list)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/models/tokenizers/auto_tokenizer.py", line 57, in batch_encode
encodings = self.tokenizer.batch_encode_plus(
File "/u/jm8wx/.conda/envs/torch/lib/python3.8/site-packages/transformers/tokenization_utils.py", line 2451, in batch_encode_plus
raise ValueError(
ValueError: batch_text_or_text_pairs has to be a list (got <class 'tuple'>)
textattack train --model distilbert-base-uncased --dataset glue:wnli --batch-size 64 --epochs 5 --max-length 256 --learning-rate 5e-05
|
ValueError
|
def prepare_example_dict(ex):
"""Returns the values in order corresponding to the data.
ex:
'Some text input'
or in the case of multi-sequence inputs:
('The premise', 'the hypothesis',)
etc.
"""
values = list(ex.values())
if len(values) == 1:
return values[0]
return tuple(values)
|
def prepare_example_dict(ex):
"""Returns the values in order corresponding to the data.
ex:
'Some text input'
or in the case of multi-sequence inputs:
('The premise', 'the hypothesis',)
etc.
"""
values = list(ex.values())
if len(values) == 1:
return values[0]
return values
|
https://github.com/QData/TextAttack/issues/165
|
$ textattack train --model distilbert-base-uncased --dataset glue:wnli --batch-size 128 --epochs 5 --max-length 256 --learning-rate 3e-05
...
Traceback (most recent call last):
File "/u/jm8wx/.conda/envs/torch/bin/textattack", line 33, in <module>
sys.exit(load_entry_point('textattack', 'console_scripts', 'textattack')())
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/textattack_cli.py", line 41, in main
args.func.run(args)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/train_model/train_model_command.py", line 30, in run
train_model(args)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/train_model/run_training.py", line 108, in train_model
train_text_ids = batch_encode(tokenizer, train_text)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/train_model/run_training.py", line 28, in batch_encode
return tokenizer.batch_encode(text_list)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/models/tokenizers/auto_tokenizer.py", line 57, in batch_encode
encodings = self.tokenizer.batch_encode_plus(
File "/u/jm8wx/.conda/envs/torch/lib/python3.8/site-packages/transformers/tokenization_utils.py", line 2451, in batch_encode_plus
raise ValueError(
ValueError: batch_text_or_text_pairs has to be a list (got <class 'tuple'>)
textattack train --model distilbert-base-uncased --dataset glue:wnli --batch-size 64 --epochs 5 --max-length 256 --learning-rate 5e-05
|
ValueError
|
def __init__(
self, word_id_map={}, pad_token_id=None, unk_token_id=None, max_length=256
):
super().__init__(
word_id_map=word_id_map,
unk_token_id=unk_token_id,
pad_token_id=pad_token_id,
lowercase=True,
)
self.pad_id = pad_token_id
self.oov_id = unk_token_id
self.convert_id_to_word = self.id_to_token
# Set defaults.
self.enable_padding(length=max_length, pad_id=pad_token_id)
self.enable_truncation(max_length=max_length)
|
def __init__(
self, word_id_map={}, pad_token_id=None, unk_token_id=None, max_length=256
):
super().__init__(
word_id_map=word_id_map,
unk_token_id=unk_token_id,
pad_token_id=pad_token_id,
lowercase=True,
)
self.pad_id = pad_token_id
self.oov_id = unk_token_id
self.convert_id_to_word = self.id_to_token
# Set defaults.
self.enable_padding(max_length=max_length, pad_id=pad_token_id)
self.enable_truncation(max_length=max_length)
|
https://github.com/QData/TextAttack/issues/165
|
$ textattack train --model distilbert-base-uncased --dataset glue:wnli --batch-size 128 --epochs 5 --max-length 256 --learning-rate 3e-05
...
Traceback (most recent call last):
File "/u/jm8wx/.conda/envs/torch/bin/textattack", line 33, in <module>
sys.exit(load_entry_point('textattack', 'console_scripts', 'textattack')())
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/textattack_cli.py", line 41, in main
args.func.run(args)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/train_model/train_model_command.py", line 30, in run
train_model(args)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/train_model/run_training.py", line 108, in train_model
train_text_ids = batch_encode(tokenizer, train_text)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/commands/train_model/run_training.py", line 28, in batch_encode
return tokenizer.batch_encode(text_list)
File "/p/qdata/jm8wx/research/text_attacks/textattack/textattack/models/tokenizers/auto_tokenizer.py", line 57, in batch_encode
encodings = self.tokenizer.batch_encode_plus(
File "/u/jm8wx/.conda/envs/torch/lib/python3.8/site-packages/transformers/tokenization_utils.py", line 2451, in batch_encode_plus
raise ValueError(
ValueError: batch_text_or_text_pairs has to be a list (got <class 'tuple'>)
textattack train --model distilbert-base-uncased --dataset glue:wnli --batch-size 64 --epochs 5 --max-length 256 --learning-rate 5e-05
|
ValueError
|
def _validate_natural(value):
if value <= 0:
raise exceptions.ApiError("Must be greater than zero", status_code=422)
|
def _validate_natural(value):
if value < 0:
raise exceptions.ApiError("Must be a natural number", status_code=422)
|
https://github.com/fecgov/openFEC/issues/4486
|
2020-07-20T10:41:24.80-0400 [APP/PROC/WEB/4] ERR [2020-07-20 14:41:24 +0000] [301] [ERROR] Error handling request /v1/candidates/?sort_nulls_last=false&sort_hide_null=false&office=P&per_page=100&sort_null_only=false&sort=name&page=0&election_year=2024
2020-07-20T10:41:24.80-0400 [APP/PROC/WEB/4] ERR Traceback (most recent call last):
2020-07-20T10:41:24.80-0400 [APP/PROC/WEB/4] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1244, in _execute_context
2020-07-20T10:41:24.80-0400 [APP/PROC/WEB/4] ERR cursor, statement, parameters, context
2020-07-20T10:41:24.80-0400 [APP/PROC/WEB/4] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 552, in do_execute
2020-07-20T10:41:24.80-0400 [APP/PROC/WEB/4] ERR cursor.execute(statement, parameters)
2020-07-20T10:41:24.80-0400 [APP/PROC/WEB/4] ERR psycopg2.errors.InvalidRowCountInResultOffsetClause: OFFSET must not be negative
|
psycopg2.error
|
def sort_args(self):
return args.make_sort_args(
validator=args.IndexValidator(
self.model,
extra=["candidate_name", "committee_name", "candidate_id", "committee_id"],
),
)
|
def sort_args(self):
return args.make_sort_args(
validator=args.IndexValidator(
self.model,
extra=["candidate", "committee"],
),
)
|
https://github.com/fecgov/openFEC/issues/4360
|
2020-05-18T14:16:00.43-0400 [RTR/1] OUT fec-prod-api.app.cloud.gov - [2020-05-18T18:16:00.388886389Z] "GET /v1/schedules/schedule_e/by_candidate/?sort_hide_null=false&sort_nulls_last=true&sort=candidate&per_page=10&page=1&cycle=2020&election_full=true&duration=6&office=senate&state=CO&stateFull=Colorado HTTP/1.1" 500 0 141 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36" "127.0.0.1:20354" "10.10.2.12:61082" x_forwarded_for:"73.212.220.45, 73.212.220.45, 54.208.160.112, 127.0.0.1" x_forwarded_proto:"https" vcap_request_id:"910be5ac-6c31-4e2a-6b66-9cd78e18ce4f" response_time:0.043971 gorouter_time:0.000124 app_id:"3ecc8d29-da07-45fa-bbfc-c0aed0322413" app_index:"8" x_b3_traceid:"c7698e3b4e20aac6" x_b3_spanid:"c7698e3b4e20aac6" x_b3_parentspanid:"-" b3:"c7698e3b4e20aac6-c7698e3b4e20aac6"
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR sqlalchemy.exc.CompileError: Can't resolve label reference for ORDER BY / GROUP BY. Textual SQL expression 'candidate' should be explicitly declared as text('candidate')
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR During handling of the above exception, another exception occurred:
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR Traceback (most recent call last):
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/gunicorn/workers/base_async.py", line 56, in handle
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR self.handle_request(listener_name, req, client, addr)
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/gunicorn/workers/ggevent.py", line 160, in handle_request
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR addr)
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/gunicorn/workers/base_async.py", line 107, in handle_request
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR respiter = self.wsgi(environ, resp.start_response)
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/flask/app.py", line 2463, in __call__
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR return self.wsgi_app(environ, start_response)
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/werkzeug/middleware/proxy_fix.py", line 232, in __call__
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR return self.app(environ, start_response)
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/flask/app.py", line 2449, in wsgi_app
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR response = self.handle_exception(e)
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/flask_restful/__init__.py", line 269, in error_router
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR return original_handler(e)
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/flask_cors/extension.py", line 161, in wrapped_function
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR return cors_after_request(app.make_response(f(*args, **kwargs)))
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/flask/app.py", line 1866, in handle_exception
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR reraise(exc_type, exc_value, tb)
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/flask/_compat.py", line 38, in reraise
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR raise value.with_traceback(tb)
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/flask/app.py", line 2446, in wsgi_app
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR response = self.full_dispatch_request()
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/flask/app.py", line 1951, in full_dispatch_request
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR rv = self.handle_user_exception(e)
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/flask_restful/__init__.py", line 269, in error_router
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR return original_handler(e)
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/flask_cors/extension.py", line 161, in wrapped_function
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR return cors_after_request(app.make_response(f(*args, **kwargs)))
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR File "/home/vcap/deps/0/python/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR return handler(e)
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR File "/home/vcap/app/webservices/rest.py", line 271, in handle_exception
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR status_code=http.client.NOT_FOUND)
2020-05-18T14:16:00.43-0400 [APP/PROC/WEB/8] ERR webservices.exceptions.ApiError
|
sqlalchemy.exc.CompileError
|
def update_view_configuration():
reboot_required = False
to_save = request.form.to_dict()
_config_string = lambda x: config.set_from_dictionary(
to_save, x, lambda y: y.strip() if y else y
)
_config_int = lambda x: config.set_from_dictionary(to_save, x, int)
_config_string("config_calibre_web_title")
_config_string("config_columns_to_ignore")
# _config_string("config_mature_content_tags")
reboot_required |= _config_string("config_title_regex")
_config_int("config_read_column")
_config_int("config_theme")
_config_int("config_random_books")
_config_int("config_books_per_page")
_config_int("config_authors_max")
_config_int("config_restricted_column")
config.config_default_role = constants.selected_roles(to_save)
config.config_default_role &= ~constants.ROLE_ANONYMOUS
config.config_default_show = sum(
int(k[5:]) for k in to_save if k.startswith("show_")
)
if "Show_detail_random" in to_save:
config.config_default_show |= constants.DETAIL_RANDOM
config.save()
flash(_("Calibre-Web configuration updated"), category="success")
before_request()
if reboot_required:
db.dispose()
ub.dispose()
web_server.stop(True)
return view_configuration()
|
def update_view_configuration():
reboot_required = False
to_save = request.form.to_dict()
_config_string = lambda x: config.set_from_dictionary(
to_save, x, lambda y: y.strip() if y else y
)
_config_int = lambda x: config.set_from_dictionary(to_save, x, int)
_config_string("config_calibre_web_title")
_config_string("config_columns_to_ignore")
# _config_string("config_mature_content_tags")
reboot_required |= _config_string("config_title_regex")
_config_int("config_read_column")
_config_int("config_theme")
_config_int("config_random_books")
_config_int("config_books_per_page")
_config_int("config_authors_max")
_config_int("config_restricted_column")
if config.config_google_drive_watch_changes_response:
config.config_google_drive_watch_changes_response = json.dumps(
config.config_google_drive_watch_changes_response
)
config.config_default_role = constants.selected_roles(to_save)
config.config_default_role &= ~constants.ROLE_ANONYMOUS
config.config_default_show = sum(
int(k[5:]) for k in to_save if k.startswith("show_")
)
if "Show_detail_random" in to_save:
config.config_default_show |= constants.DETAIL_RANDOM
config.save()
flash(_("Calibre-Web configuration updated"), category="success")
before_request()
if reboot_required:
db.dispose()
ub.dispose()
web_server.stop(True)
return view_configuration()
|
https://github.com/janeczku/calibre-web/issues/1387
|
[services.d] starting services
[services.d] done.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py", line 1248, in _execute_context
cursor, statement, parameters, context
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/default.py", line 590, in do_execute
cursor.execute(statement, parameters)
sqlite3.InterfaceError: Error binding parameter 1 - probably unsupported type.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.6/dist-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 272, in decorated_view
return func(*args, **kwargs)
File "/app/calibre-web/cps/web.py", line 213, in inner
return f(*args, **kwargs)
File "/app/calibre-web/cps/admin.py", line 142, in configuration
return _configuration_update_helper()
File "/app/calibre-web/cps/admin.py", line 673, in _configuration_update_helper
config.save()
File "/app/calibre-web/cps/config_sql.py", line 289, in save
self._session.merge(s)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 2117, in merge
self._autoflush()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1602, in _autoflush
util.raise_(e, with_traceback=sys.exc_info()[2])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1591, in _autoflush
self.flush()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 2496, in flush
self._flush(objects)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 2637, in _flush
transaction.rollback(_capture_exception=True)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/langhelpers.py", line 69, in __exit__
exc_value, with_traceback=exc_tb,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 2597, in _flush
flush_context.execute()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
uow,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/persistence.py", line 236, in save_obj
update,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/persistence.py", line 995, in _emit_update_statements
statement, multiparams
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py", line 984, in execute
return meth(self, multiparams, params)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/elements.py", line 293, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py", line 1103, in _execute_clauseelement
distilled_params,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py", line 1288, in _execute_context
e, statement, parameters, cursor, context
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py", line 1482, in _handle_dbapi_exception
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py", line 1248, in _execute_context
cursor, statement, parameters, context
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/default.py", line 590, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.InterfaceError: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 968, in handle_one_response
self.run_application()
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 915, in run_application
self.result = self.application(self.environ, self.start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2464, in __call__
return self.wsgi_app(environ, start_response)
File "/app/calibre-web/cps/reverseproxy.py", line 80, in __call__
return self.app(environ, start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2450, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1879, in handle_exception
server_error = handler(server_error)
File "/app/calibre-web/cps/web.py", line 107, in internal_error
instance=config.config_calibre_web_title
File "/usr/local/lib/python3.6/dist-packages/flask/templating.py", line 140, in render_template
ctx.app,
File "/usr/local/lib/python3.6/dist-packages/flask/templating.py", line 120, in _render
rv = template.render(context)
File "/usr/local/lib/python3.6/dist-packages/jinja2/environment.py", line 1090, in render
self.environment.handle_exception()
File "/usr/local/lib/python3.6/dist-packages/jinja2/environment.py", line 832, in handle_exception
reraise(*rewrite_traceback_stack(source=source))
File "/usr/local/lib/python3.6/dist-packages/jinja2/_compat.py", line 28, in reraise
raise value.with_traceback(tb)
File "/app/calibre-web/cps/templates/http_error.html", line 2, in top-level template code
<html class="http-error" lang="{{ g.user.locale }}">
File "/usr/local/lib/python3.6/dist-packages/jinja2/environment.py", line 471, in getattr
return getattr(obj, attribute)
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/attributes.py", line 286, in __get__
return self.impl.get(instance_state(instance), dict_)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/attributes.py", line 717, in get
value = state._load_expired(state, passive)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/state.py", line 652, in _load_expired
self.manager.deferred_scalar_loader(self, toload)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/loading.py", line 1012, in load_scalar_attributes
only_load_props=attribute_names,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/loading.py", line 207, in load_on_ident
identity_token=identity_token,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/loading.py", line 287, in load_on_pk_identity
return q.one()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3360, in one
ret = self.one_or_none()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3329, in one_or_none
ret = list(self)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
2020-05-12T06:43:20Z {'REMOTE_ADDR': '::ffff:192.168.2.7', 'REMOTE_PORT': '51876', 'HTTP_HOST': 'calibre.net-slum.org', (hidden keys: 38)} failed with InvalidRequestError
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.6/dist-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1948, in full_dispatch_request
rv = self.preprocess_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2242, in preprocess_request
rv = func()
File "/app/calibre-web/cps/web.py", line 293, in before_request
or_(ub.Shelf.is_public == 1, ub.Shelf.user_id == current_user.id)).order_by(ub.Shelf.name).all()
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 306, in _get_current_object
return self.__local()
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 26, in <lambda>
current_user = LocalProxy(lambda: _get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 968, in handle_one_response
self.run_application()
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 915, in run_application
self.result = self.application(self.environ, self.start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2464, in __call__
return self.wsgi_app(environ, start_response)
File "/app/calibre-web/cps/reverseproxy.py", line 80, in __call__
return self.app(environ, start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2450, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1879, in handle_exception
server_error = handler(server_error)
File "/app/calibre-web/cps/web.py", line 107, in internal_error
instance=config.config_calibre_web_title
File "/usr/local/lib/python3.6/dist-packages/flask/templating.py", line 136, in render_template
ctx.app.update_template_context(context)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 838, in update_template_context
context.update(func())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 379, in _user_context_processor
return dict(current_user=_get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
2020-05-12T06:43:20Z {'REMOTE_ADDR': '::ffff:192.168.2.7', 'REMOTE_PORT': '51860', 'HTTP_HOST': 'calibre.net-slum.org', (hidden keys: 32)} failed with InvalidRequestError
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.6/dist-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1948, in full_dispatch_request
rv = self.preprocess_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2242, in preprocess_request
rv = func()
File "/app/calibre-web/cps/web.py", line 293, in before_request
or_(ub.Shelf.is_public == 1, ub.Shelf.user_id == current_user.id)).order_by(ub.Shelf.name).all()
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 306, in _get_current_object
return self.__local()
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 26, in <lambda>
current_user = LocalProxy(lambda: _get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 968, in handle_one_response
self.run_application()
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 915, in run_application
self.result = self.application(self.environ, self.start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2464, in __call__
return self.wsgi_app(environ, start_response)
File "/app/calibre-web/cps/reverseproxy.py", line 80, in __call__
return self.app(environ, start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2450, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1879, in handle_exception
server_error = handler(server_error)
File "/app/calibre-web/cps/web.py", line 107, in internal_error
instance=config.config_calibre_web_title
File "/usr/local/lib/python3.6/dist-packages/flask/templating.py", line 136, in render_template
ctx.app.update_template_context(context)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 838, in update_template_context
context.update(func())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 379, in _user_context_processor
return dict(current_user=_get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
2020-05-12T06:43:24Z {'REMOTE_ADDR': '::ffff:192.168.2.7', 'REMOTE_PORT': '51880', 'HTTP_HOST': 'calibre.net-slum.org', (hidden keys: 33)} failed with InvalidRequestError
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.6/dist-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1948, in full_dispatch_request
rv = self.preprocess_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2242, in preprocess_request
rv = func()
File "/app/calibre-web/cps/web.py", line 293, in before_request
or_(ub.Shelf.is_public == 1, ub.Shelf.user_id == current_user.id)).order_by(ub.Shelf.name).all()
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 306, in _get_current_object
return self.__local()
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 26, in <lambda>
current_user = LocalProxy(lambda: _get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 968, in handle_one_response
self.run_application()
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 915, in run_application
self.result = self.application(self.environ, self.start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2464, in __call__
return self.wsgi_app(environ, start_response)
File "/app/calibre-web/cps/reverseproxy.py", line 80, in __call__
return self.app(environ, start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2450, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1879, in handle_exception
server_error = handler(server_error)
File "/app/calibre-web/cps/web.py", line 107, in internal_error
instance=config.config_calibre_web_title
File "/usr/local/lib/python3.6/dist-packages/flask/templating.py", line 136, in render_template
ctx.app.update_template_context(context)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 838, in update_template_context
context.update(func())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 379, in _user_context_processor
return dict(current_user=_get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
2020-05-12T06:43:24Z {'REMOTE_ADDR': '::ffff:192.168.2.7', 'REMOTE_PORT': '51870', 'HTTP_HOST': 'calibre.net-slum.org', (hidden keys: 32)} failed with InvalidRequestError
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.6/dist-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1948, in full_dispatch_request
rv = self.preprocess_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2242, in preprocess_request
rv = func()
File "/app/calibre-web/cps/web.py", line 293, in before_request
or_(ub.Shelf.is_public == 1, ub.Shelf.user_id == current_user.id)).order_by(ub.Shelf.name).all()
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 306, in _get_current_object
return self.__local()
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 26, in <lambda>
current_user = LocalProxy(lambda: _get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 968, in handle_one_response
self.run_application()
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 915, in run_application
self.result = self.application(self.environ, self.start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2464, in __call__
return self.wsgi_app(environ, start_response)
File "/app/calibre-web/cps/reverseproxy.py", line 80, in __call__
return self.app(environ, start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2450, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1879, in handle_exception
server_error = handler(server_error)
File "/app/calibre-web/cps/web.py", line 107, in internal_error
instance=config.config_calibre_web_title
File "/usr/local/lib/python3.6/dist-packages/flask/templating.py", line 136, in render_template
ctx.app.update_template_context(context)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 838, in update_template_context
context.update(func())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 379, in _user_context_processor
return dict(current_user=_get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
2020-05-12T06:43:26Z {'REMOTE_ADDR': '::ffff:192.168.2.7', 'REMOTE_PORT': '51862', 'HTTP_HOST': 'calibre.net-slum.org', (hidden keys: 33)} failed with InvalidRequestError
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.6/dist-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1948, in full_dispatch_request
rv = self.preprocess_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2242, in preprocess_request
rv = func()
File "/app/calibre-web/cps/web.py", line 293, in before_request
or_(ub.Shelf.is_public == 1, ub.Shelf.user_id == current_user.id)).order_by(ub.Shelf.name).all()
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 306, in _get_current_object
return self.__local()
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 26, in <lambda>
current_user = LocalProxy(lambda: _get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
|
sqlite3.InterfaceError
|
def save(self):
"""Apply all configuration values to the underlying storage."""
s = self._read_from_storage() # type: _Settings
if self.config_google_drive_watch_changes_response:
self.config_google_drive_watch_changes_response = json.dumps(
self.config_google_drive_watch_changes_response
)
for k, v in self.__dict__.items():
if k[0] == "_":
continue
if hasattr(s, k):
setattr(s, k, v)
log.debug("_ConfigSQL updating storage")
self._session.merge(s)
self._session.commit()
self.load()
|
def save(self):
"""Apply all configuration values to the underlying storage."""
s = self._read_from_storage() # type: _Settings
for k, v in self.__dict__.items():
if k[0] == "_":
continue
if hasattr(s, k):
setattr(s, k, v)
log.debug("_ConfigSQL updating storage")
self._session.merge(s)
self._session.commit()
self.load()
|
https://github.com/janeczku/calibre-web/issues/1387
|
[services.d] starting services
[services.d] done.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py", line 1248, in _execute_context
cursor, statement, parameters, context
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/default.py", line 590, in do_execute
cursor.execute(statement, parameters)
sqlite3.InterfaceError: Error binding parameter 1 - probably unsupported type.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.6/dist-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 272, in decorated_view
return func(*args, **kwargs)
File "/app/calibre-web/cps/web.py", line 213, in inner
return f(*args, **kwargs)
File "/app/calibre-web/cps/admin.py", line 142, in configuration
return _configuration_update_helper()
File "/app/calibre-web/cps/admin.py", line 673, in _configuration_update_helper
config.save()
File "/app/calibre-web/cps/config_sql.py", line 289, in save
self._session.merge(s)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 2117, in merge
self._autoflush()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1602, in _autoflush
util.raise_(e, with_traceback=sys.exc_info()[2])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1591, in _autoflush
self.flush()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 2496, in flush
self._flush(objects)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 2637, in _flush
transaction.rollback(_capture_exception=True)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/langhelpers.py", line 69, in __exit__
exc_value, with_traceback=exc_tb,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 2597, in _flush
flush_context.execute()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
uow,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/persistence.py", line 236, in save_obj
update,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/persistence.py", line 995, in _emit_update_statements
statement, multiparams
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py", line 984, in execute
return meth(self, multiparams, params)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/elements.py", line 293, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py", line 1103, in _execute_clauseelement
distilled_params,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py", line 1288, in _execute_context
e, statement, parameters, cursor, context
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py", line 1482, in _handle_dbapi_exception
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py", line 1248, in _execute_context
cursor, statement, parameters, context
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/default.py", line 590, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.InterfaceError: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 968, in handle_one_response
self.run_application()
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 915, in run_application
self.result = self.application(self.environ, self.start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2464, in __call__
return self.wsgi_app(environ, start_response)
File "/app/calibre-web/cps/reverseproxy.py", line 80, in __call__
return self.app(environ, start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2450, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1879, in handle_exception
server_error = handler(server_error)
File "/app/calibre-web/cps/web.py", line 107, in internal_error
instance=config.config_calibre_web_title
File "/usr/local/lib/python3.6/dist-packages/flask/templating.py", line 140, in render_template
ctx.app,
File "/usr/local/lib/python3.6/dist-packages/flask/templating.py", line 120, in _render
rv = template.render(context)
File "/usr/local/lib/python3.6/dist-packages/jinja2/environment.py", line 1090, in render
self.environment.handle_exception()
File "/usr/local/lib/python3.6/dist-packages/jinja2/environment.py", line 832, in handle_exception
reraise(*rewrite_traceback_stack(source=source))
File "/usr/local/lib/python3.6/dist-packages/jinja2/_compat.py", line 28, in reraise
raise value.with_traceback(tb)
File "/app/calibre-web/cps/templates/http_error.html", line 2, in top-level template code
<html class="http-error" lang="{{ g.user.locale }}">
File "/usr/local/lib/python3.6/dist-packages/jinja2/environment.py", line 471, in getattr
return getattr(obj, attribute)
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/attributes.py", line 286, in __get__
return self.impl.get(instance_state(instance), dict_)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/attributes.py", line 717, in get
value = state._load_expired(state, passive)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/state.py", line 652, in _load_expired
self.manager.deferred_scalar_loader(self, toload)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/loading.py", line 1012, in load_scalar_attributes
only_load_props=attribute_names,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/loading.py", line 207, in load_on_ident
identity_token=identity_token,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/loading.py", line 287, in load_on_pk_identity
return q.one()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3360, in one
ret = self.one_or_none()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3329, in one_or_none
ret = list(self)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
2020-05-12T06:43:20Z {'REMOTE_ADDR': '::ffff:192.168.2.7', 'REMOTE_PORT': '51876', 'HTTP_HOST': 'calibre.net-slum.org', (hidden keys: 38)} failed with InvalidRequestError
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.6/dist-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1948, in full_dispatch_request
rv = self.preprocess_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2242, in preprocess_request
rv = func()
File "/app/calibre-web/cps/web.py", line 293, in before_request
or_(ub.Shelf.is_public == 1, ub.Shelf.user_id == current_user.id)).order_by(ub.Shelf.name).all()
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 306, in _get_current_object
return self.__local()
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 26, in <lambda>
current_user = LocalProxy(lambda: _get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 968, in handle_one_response
self.run_application()
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 915, in run_application
self.result = self.application(self.environ, self.start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2464, in __call__
return self.wsgi_app(environ, start_response)
File "/app/calibre-web/cps/reverseproxy.py", line 80, in __call__
return self.app(environ, start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2450, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1879, in handle_exception
server_error = handler(server_error)
File "/app/calibre-web/cps/web.py", line 107, in internal_error
instance=config.config_calibre_web_title
File "/usr/local/lib/python3.6/dist-packages/flask/templating.py", line 136, in render_template
ctx.app.update_template_context(context)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 838, in update_template_context
context.update(func())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 379, in _user_context_processor
return dict(current_user=_get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
2020-05-12T06:43:20Z {'REMOTE_ADDR': '::ffff:192.168.2.7', 'REMOTE_PORT': '51860', 'HTTP_HOST': 'calibre.net-slum.org', (hidden keys: 32)} failed with InvalidRequestError
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.6/dist-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1948, in full_dispatch_request
rv = self.preprocess_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2242, in preprocess_request
rv = func()
File "/app/calibre-web/cps/web.py", line 293, in before_request
or_(ub.Shelf.is_public == 1, ub.Shelf.user_id == current_user.id)).order_by(ub.Shelf.name).all()
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 306, in _get_current_object
return self.__local()
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 26, in <lambda>
current_user = LocalProxy(lambda: _get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 968, in handle_one_response
self.run_application()
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 915, in run_application
self.result = self.application(self.environ, self.start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2464, in __call__
return self.wsgi_app(environ, start_response)
File "/app/calibre-web/cps/reverseproxy.py", line 80, in __call__
return self.app(environ, start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2450, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1879, in handle_exception
server_error = handler(server_error)
File "/app/calibre-web/cps/web.py", line 107, in internal_error
instance=config.config_calibre_web_title
File "/usr/local/lib/python3.6/dist-packages/flask/templating.py", line 136, in render_template
ctx.app.update_template_context(context)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 838, in update_template_context
context.update(func())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 379, in _user_context_processor
return dict(current_user=_get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
2020-05-12T06:43:24Z {'REMOTE_ADDR': '::ffff:192.168.2.7', 'REMOTE_PORT': '51880', 'HTTP_HOST': 'calibre.net-slum.org', (hidden keys: 33)} failed with InvalidRequestError
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.6/dist-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1948, in full_dispatch_request
rv = self.preprocess_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2242, in preprocess_request
rv = func()
File "/app/calibre-web/cps/web.py", line 293, in before_request
or_(ub.Shelf.is_public == 1, ub.Shelf.user_id == current_user.id)).order_by(ub.Shelf.name).all()
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 306, in _get_current_object
return self.__local()
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 26, in <lambda>
current_user = LocalProxy(lambda: _get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 968, in handle_one_response
self.run_application()
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 915, in run_application
self.result = self.application(self.environ, self.start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2464, in __call__
return self.wsgi_app(environ, start_response)
File "/app/calibre-web/cps/reverseproxy.py", line 80, in __call__
return self.app(environ, start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2450, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1879, in handle_exception
server_error = handler(server_error)
File "/app/calibre-web/cps/web.py", line 107, in internal_error
instance=config.config_calibre_web_title
File "/usr/local/lib/python3.6/dist-packages/flask/templating.py", line 136, in render_template
ctx.app.update_template_context(context)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 838, in update_template_context
context.update(func())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 379, in _user_context_processor
return dict(current_user=_get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
2020-05-12T06:43:24Z {'REMOTE_ADDR': '::ffff:192.168.2.7', 'REMOTE_PORT': '51870', 'HTTP_HOST': 'calibre.net-slum.org', (hidden keys: 32)} failed with InvalidRequestError
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.6/dist-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1948, in full_dispatch_request
rv = self.preprocess_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2242, in preprocess_request
rv = func()
File "/app/calibre-web/cps/web.py", line 293, in before_request
or_(ub.Shelf.is_public == 1, ub.Shelf.user_id == current_user.id)).order_by(ub.Shelf.name).all()
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 306, in _get_current_object
return self.__local()
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 26, in <lambda>
current_user = LocalProxy(lambda: _get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 968, in handle_one_response
self.run_application()
File "/usr/local/lib/python3.6/dist-packages/gevent/pywsgi.py", line 915, in run_application
self.result = self.application(self.environ, self.start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2464, in __call__
return self.wsgi_app(environ, start_response)
File "/app/calibre-web/cps/reverseproxy.py", line 80, in __call__
return self.app(environ, start_response)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2450, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1879, in handle_exception
server_error = handler(server_error)
File "/app/calibre-web/cps/web.py", line 107, in internal_error
instance=config.config_calibre_web_title
File "/usr/local/lib/python3.6/dist-packages/flask/templating.py", line 136, in render_template
ctx.app.update_template_context(context)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 838, in update_template_context
context.update(func())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 379, in _user_context_processor
return dict(current_user=_get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
2020-05-12T06:43:26Z {'REMOTE_ADDR': '::ffff:192.168.2.7', 'REMOTE_PORT': '51862', 'HTTP_HOST': 'calibre.net-slum.org', (hidden keys: 33)} failed with InvalidRequestError
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.6/dist-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1948, in full_dispatch_request
rv = self.preprocess_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2242, in preprocess_request
rv = func()
File "/app/calibre-web/cps/web.py", line 293, in before_request
or_(ub.Shelf.is_public == 1, ub.Shelf.user_id == current_user.id)).order_by(ub.Shelf.name).all()
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/local/lib/python3.6/dist-packages/werkzeug/local.py", line 306, in _get_current_object
return self.__local()
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 26, in <lambda>
current_user = LocalProxy(lambda: _get_user())
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 346, in _get_user
current_app.login_manager._load_user()
File "/usr/local/lib/python3.6/dist-packages/flask_login/login_manager.py", line 318, in _load_user
user = self._user_callback(user_id)
File "/app/calibre-web/cps/web.py", line 138, in load_user
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3300, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3078, in __getitem__
return list(res)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3427, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3442, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py", line 3420, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1133, in connection
execution_options=execution_options,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 1139, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(sqlite3.InterfaceError) Error binding parameter 1 - probably unsupported type.
[SQL: UPDATE settings SET config_google_drive_folder=?, config_google_drive_watch_changes_response=? WHERE settings.id = ?]
[parameters: ('Google Drive Folder Name I Am Not Using', {'kind': 'api#channel', 'id': 'idnumberhere', 'resourceId': 'resourceidhere', 'resourceUri': 'https://www.googlea ... (162 characters truncated) ... se&maxResults=100&supportsAllDrives=false&supportsTeamDrives=false&alt=json', 'token': 'target=calibreweb-watch_files', 'expiration': '1589870595000'}, 1)]
(Background on this error at: http://sqlalche.me/e/rvf5) (Background on this error at: http://sqlalche.me/e/7s2a)
|
sqlite3.InterfaceError
|
def _configuration_update_helper():
reboot_required = False
db_change = False
to_save = request.form.to_dict()
to_save["config_calibre_dir"] = re.sub(
"[[\\/]metadata\.db$", "", to_save["config_calibre_dir"], flags=re.IGNORECASE
)
db_change |= _config_string(to_save, "config_calibre_dir")
# Google drive setup
gdriveError = _configuration_gdrive_helper(to_save)
reboot_required |= _config_int(to_save, "config_port")
reboot_required |= _config_string(to_save, "config_keyfile")
if config.config_keyfile and not os.path.isfile(config.config_keyfile):
return _configuration_result(
_("Keyfile Location is not Valid, Please Enter Correct Path"), gdriveError
)
reboot_required |= _config_string(to_save, "config_certfile")
if config.config_certfile and not os.path.isfile(config.config_certfile):
return _configuration_result(
_("Certfile Location is not Valid, Please Enter Correct Path"), gdriveError
)
_config_checkbox_int(to_save, "config_uploading")
_config_checkbox_int(to_save, "config_anonbrowse")
_config_checkbox_int(to_save, "config_public_reg")
reboot_required |= _config_checkbox_int(to_save, "config_kobo_sync")
_config_checkbox_int(to_save, "config_kobo_proxy")
_config_string(to_save, "config_upload_formats")
constants.EXTENSIONS_UPLOAD = [
x.lstrip().rstrip() for x in config.config_upload_formats.split(",")
]
_config_string(to_save, "config_calibre")
_config_string(to_save, "config_converterpath")
_config_string(to_save, "config_kepubifypath")
reboot_required |= _config_int(to_save, "config_login_type")
# LDAP configurator,
if config.config_login_type == constants.LOGIN_LDAP:
reboot_required |= _configuration_ldap_helper(to_save, gdriveError)
# Remote login configuration
_config_checkbox(to_save, "config_remote_login")
if not config.config_remote_login:
ub.session.query(ub.RemoteAuthToken).filter(
ub.RemoteAuthToken.token_type == 0
).delete()
# Goodreads configuration
_config_checkbox(to_save, "config_use_goodreads")
_config_string(to_save, "config_goodreads_api_key")
_config_string(to_save, "config_goodreads_api_secret")
if services.goodreads_support:
services.goodreads_support.connect(
config.config_goodreads_api_key,
config.config_goodreads_api_secret,
config.config_use_goodreads,
)
_config_int(to_save, "config_updatechannel")
# Reverse proxy login configuration
_config_checkbox(to_save, "config_allow_reverse_proxy_header_login")
_config_string(to_save, "config_reverse_proxy_login_header_name")
# OAuth configuration
if config.config_login_type == constants.LOGIN_OAUTH:
_configuration_oauth_helper(to_save)
reboot_required |= _configuration_logfile_helper(to_save, gdriveError)
# Rarfile Content configuration
_config_string(to_save, "config_rarfile_location")
unrar_status = helper.check_unrar(config.config_rarfile_location)
if unrar_status:
return _configuration_result(unrar_status, gdriveError)
try:
metadata_db = os.path.join(config.config_calibre_dir, "metadata.db")
if (
config.config_use_google_drive
and is_gdrive_ready()
and not os.path.exists(metadata_db)
):
gdriveutils.downloadFile(None, "metadata.db", metadata_db)
db_change = True
except Exception as e:
return _configuration_result("%s" % e, gdriveError)
if db_change:
if not db.setup_db(config):
return _configuration_result(
_("DB Location is not Valid, Please Enter Correct Path"), gdriveError
)
if not os.access(
os.path.join(config.config_calibre_dir, "metadata.db"), os.W_OK
):
flash(_("DB is not writeable"), category="warning")
config.save()
flash(_("Calibre-Web configuration updated"), category="success")
if reboot_required:
web_server.stop(True)
return _configuration_result(None, gdriveError)
|
def _configuration_update_helper():
reboot_required = False
db_change = False
to_save = request.form.to_dict()
db_change |= _config_string(to_save, "config_calibre_dir")
# Google drive setup
gdriveError = _configuration_gdrive_helper(to_save)
reboot_required |= _config_int(to_save, "config_port")
reboot_required |= _config_string(to_save, "config_keyfile")
if config.config_keyfile and not os.path.isfile(config.config_keyfile):
return _configuration_result(
_("Keyfile Location is not Valid, Please Enter Correct Path"), gdriveError
)
reboot_required |= _config_string(to_save, "config_certfile")
if config.config_certfile and not os.path.isfile(config.config_certfile):
return _configuration_result(
_("Certfile Location is not Valid, Please Enter Correct Path"), gdriveError
)
_config_checkbox_int(to_save, "config_uploading")
_config_checkbox_int(to_save, "config_anonbrowse")
_config_checkbox_int(to_save, "config_public_reg")
reboot_required |= _config_checkbox_int(to_save, "config_kobo_sync")
_config_checkbox_int(to_save, "config_kobo_proxy")
_config_string(to_save, "config_upload_formats")
constants.EXTENSIONS_UPLOAD = [
x.lstrip().rstrip() for x in config.config_upload_formats.split(",")
]
_config_string(to_save, "config_calibre")
_config_string(to_save, "config_converterpath")
_config_string(to_save, "config_kepubifypath")
reboot_required |= _config_int(to_save, "config_login_type")
# LDAP configurator,
if config.config_login_type == constants.LOGIN_LDAP:
reboot_required |= _configuration_ldap_helper(to_save, gdriveError)
# Remote login configuration
_config_checkbox(to_save, "config_remote_login")
if not config.config_remote_login:
ub.session.query(ub.RemoteAuthToken).filter(
ub.RemoteAuthToken.token_type == 0
).delete()
# Goodreads configuration
_config_checkbox(to_save, "config_use_goodreads")
_config_string(to_save, "config_goodreads_api_key")
_config_string(to_save, "config_goodreads_api_secret")
if services.goodreads_support:
services.goodreads_support.connect(
config.config_goodreads_api_key,
config.config_goodreads_api_secret,
config.config_use_goodreads,
)
_config_int(to_save, "config_updatechannel")
# Reverse proxy login configuration
_config_checkbox(to_save, "config_allow_reverse_proxy_header_login")
_config_string(to_save, "config_reverse_proxy_login_header_name")
# OAuth configuration
if config.config_login_type == constants.LOGIN_OAUTH:
_configuration_oauth_helper(to_save)
reboot_required |= _configuration_logfile_helper(to_save, gdriveError)
# Rarfile Content configuration
_config_string(to_save, "config_rarfile_location")
unrar_status = helper.check_unrar(config.config_rarfile_location)
if unrar_status:
return _configuration_result(unrar_status, gdriveError)
try:
metadata_db = os.path.join(config.config_calibre_dir, "metadata.db")
if (
config.config_use_google_drive
and is_gdrive_ready()
and not os.path.exists(metadata_db)
):
gdriveutils.downloadFile(None, "metadata.db", metadata_db)
db_change = True
except Exception as e:
return _configuration_result("%s" % e, gdriveError)
if db_change:
if not db.setup_db(config):
return _configuration_result(
_("DB Location is not Valid, Please Enter Correct Path"), gdriveError
)
config.save()
flash(_("Calibre-Web configuration updated"), category="success")
if reboot_required:
web_server.stop(True)
return _configuration_result(None, gdriveError)
|
https://github.com/janeczku/calibre-web/issues/1356
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1935, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/app/calibre-web/cps/web.py", line 165, in decorated_view
return login_required(func)(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/flask_login/utils.py", line 261, in decorated_view
return func(*args, **kwargs)
File "/app/calibre-web/cps/web.py", line 483, in index
entries, random, pagination = fill_indexpage(page, db.Books, True, [db.Books.timestamp.desc()])
File "/app/calibre-web/cps/helper.py", line 739, in fill_indexpage
len(db.session.query(database).filter(db_filter).filter(common_filters()).all()))
File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/query.py", line 3178, in all
return list(self)
File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/query.py", line 3334, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/query.py", line 3356, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/query.py", line 3371, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/query.py", line 3349, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/session.py", line 1124, in connection
execution_options=execution_options,
File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/session.py", line 1130, in _connection_for_bind
engine, execution_options
File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/session.py", line 408, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/session.py", line 295, in _assert_active
code="7s2a",
InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (sqlite3.OperationalError) attempt to write a readonly database
[SQL: INSERT INTO custom_column_8 (book, value) VALUES (?, ?)]
[parameters: (23130, 1)]
(Background on this error at: http://sqlalche.me/e/e3q8) (Background on this error at: http://sqlalche.me/e/7s2a)
Please report this issue with all related information: Create issue
|
InvalidRequestError
|
def migrate_Database(session):
engine = session.bind
if not engine.dialect.has_table(engine.connect(), "book_read_link"):
ReadBook.__table__.create(bind=engine)
if not engine.dialect.has_table(engine.connect(), "bookmark"):
Bookmark.__table__.create(bind=engine)
if not engine.dialect.has_table(engine.connect(), "kobo_reading_state"):
KoboReadingState.__table__.create(bind=engine)
if not engine.dialect.has_table(engine.connect(), "kobo_bookmark"):
KoboBookmark.__table__.create(bind=engine)
if not engine.dialect.has_table(engine.connect(), "kobo_statistics"):
KoboStatistics.__table__.create(bind=engine)
if not engine.dialect.has_table(engine.connect(), "archived_book"):
ArchivedBook.__table__.create(bind=engine)
if not engine.dialect.has_table(engine.connect(), "registration"):
ReadBook.__table__.create(bind=engine)
conn = engine.connect()
conn.execute("insert into registration (domain, allow) values('%.%',1)")
session.commit()
try:
session.query(exists().where(Registration.allow)).scalar()
session.commit()
except exc.OperationalError: # Database is not compatible, some columns are missing
conn = engine.connect()
conn.execute("ALTER TABLE registration ADD column 'allow' INTEGER")
conn.execute("update registration set 'allow' = 1")
session.commit()
try:
session.query(exists().where(RemoteAuthToken.token_type)).scalar()
session.commit()
except exc.OperationalError: # Database is not compatible, some columns are missing
conn = engine.connect()
conn.execute(
"ALTER TABLE remote_auth_token ADD column 'token_type' INTEGER DEFAULT 0"
)
conn.execute("update remote_auth_token set 'token_type' = 0")
session.commit()
try:
session.query(exists().where(ReadBook.read_status)).scalar()
except exc.OperationalError:
conn = engine.connect()
conn.execute(
"ALTER TABLE book_read_link ADD column 'read_status' INTEGER DEFAULT 0"
)
conn.execute("UPDATE book_read_link SET 'read_status' = 1 WHERE is_read")
conn.execute("ALTER TABLE book_read_link ADD column 'last_modified' DATETIME")
conn.execute(
"ALTER TABLE book_read_link ADD column 'last_time_started_reading' DATETIME"
)
conn.execute(
"ALTER TABLE book_read_link ADD column 'times_started_reading' INTEGER DEFAULT 0"
)
session.commit()
test = session.query(ReadBook).filter(ReadBook.last_modified == None).all()
for book in test:
book.last_modified = datetime.datetime.utcnow()
session.commit()
try:
session.query(exists().where(Shelf.uuid)).scalar()
except exc.OperationalError:
conn = engine.connect()
conn.execute("ALTER TABLE shelf ADD column 'uuid' STRING")
conn.execute("ALTER TABLE shelf ADD column 'created' DATETIME")
conn.execute("ALTER TABLE shelf ADD column 'last_modified' DATETIME")
conn.execute("ALTER TABLE book_shelf_link ADD column 'date_added' DATETIME")
for shelf in session.query(Shelf).all():
shelf.uuid = str(uuid.uuid4())
shelf.created = datetime.datetime.now()
shelf.last_modified = datetime.datetime.now()
for book_shelf in session.query(BookShelf).all():
book_shelf.date_added = datetime.datetime.now()
session.commit()
# Handle table exists, but no content
cnt = session.query(Registration).count()
if not cnt:
conn = engine.connect()
conn.execute("insert into registration (domain, allow) values('%.%',1)")
session.commit()
try:
session.query(exists().where(BookShelf.order)).scalar()
except exc.OperationalError: # Database is not compatible, some columns are missing
conn = engine.connect()
conn.execute("ALTER TABLE book_shelf_link ADD column 'order' INTEGER DEFAULT 1")
session.commit()
try:
create = False
session.query(exists().where(User.sidebar_view)).scalar()
except exc.OperationalError: # Database is not compatible, some columns are missing
conn = engine.connect()
conn.execute("ALTER TABLE user ADD column `sidebar_view` Integer DEFAULT 1")
session.commit()
create = True
try:
if create:
conn = engine.connect()
conn.execute("SELECT language_books FROM user")
session.commit()
except exc.OperationalError:
conn = engine.connect()
conn.execute(
"UPDATE user SET 'sidebar_view' = (random_books* :side_random + language_books * :side_lang "
"+ series_books * :side_series + category_books * :side_category + hot_books * "
":side_hot + :side_autor + :detail_random)",
{
"side_random": constants.SIDEBAR_RANDOM,
"side_lang": constants.SIDEBAR_LANGUAGE,
"side_series": constants.SIDEBAR_SERIES,
"side_category": constants.SIDEBAR_CATEGORY,
"side_hot": constants.SIDEBAR_HOT,
"side_autor": constants.SIDEBAR_AUTHOR,
"detail_random": constants.DETAIL_RANDOM,
},
)
session.commit()
try:
session.query(exists().where(User.denied_tags)).scalar()
except exc.OperationalError: # Database is not compatible, some columns are missing
conn = engine.connect()
conn.execute("ALTER TABLE user ADD column `denied_tags` String DEFAULT ''")
conn.execute("ALTER TABLE user ADD column `allowed_tags` String DEFAULT ''")
conn.execute("ALTER TABLE user ADD column `denied_column_value` DEFAULT ''")
conn.execute("ALTER TABLE user ADD column `allowed_column_value` DEFAULT ''")
session.commit()
if (
session.query(User)
.filter(User.role.op("&")(constants.ROLE_ANONYMOUS) == constants.ROLE_ANONYMOUS)
.first()
is None
):
create_anonymous_user(session)
try:
# check if one table with autoincrement is existing (should be user table)
conn = engine.connect()
conn.execute("SELECT COUNT(*) FROM sqlite_sequence WHERE name='user'")
except exc.OperationalError:
# Create new table user_id and copy contents of table user into it
conn = engine.connect()
conn.execute(
"CREATE TABLE user_id (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,"
" nickname VARCHAR(64),"
"email VARCHAR(120),"
"role SMALLINT,"
"password VARCHAR,"
"kindle_mail VARCHAR(120),"
"locale VARCHAR(2),"
"sidebar_view INTEGER,"
"default_language VARCHAR(3),"
"UNIQUE (nickname),"
"UNIQUE (email))"
)
conn.execute(
"INSERT INTO user_id(id, nickname, email, role, password, kindle_mail,locale,"
"sidebar_view, default_language) "
"SELECT id, nickname, email, role, password, kindle_mail, locale,"
"sidebar_view, default_language FROM user"
)
# delete old user table and rename new user_id table to user:
conn.execute("DROP TABLE user")
conn.execute("ALTER TABLE user_id RENAME TO user")
session.commit()
# Remove login capability of user Guest
conn = engine.connect()
conn.execute(
"UPDATE user SET password='' where nickname = 'Guest' and password !=''"
)
session.commit()
|
def migrate_Database(session):
engine = session.bind
if not engine.dialect.has_table(engine.connect(), "book_read_link"):
ReadBook.__table__.create(bind=engine)
if not engine.dialect.has_table(engine.connect(), "bookmark"):
Bookmark.__table__.create(bind=engine)
if not engine.dialect.has_table(engine.connect(), "kobo_reading_state"):
KoboReadingState.__table__.create(bind=engine)
if not engine.dialect.has_table(engine.connect(), "kobo_bookmark"):
KoboBookmark.__table__.create(bind=engine)
if not engine.dialect.has_table(engine.connect(), "kobo_statistics"):
KoboStatistics.__table__.create(bind=engine)
if not engine.dialect.has_table(engine.connect(), "archived_book"):
ArchivedBook.__table__.create(bind=engine)
if not engine.dialect.has_table(engine.connect(), "registration"):
ReadBook.__table__.create(bind=engine)
conn = engine.connect()
conn.execute("insert into registration (domain, allow) values('%.%',1)")
session.commit()
try:
session.query(exists().where(Registration.allow)).scalar()
session.commit()
except exc.OperationalError: # Database is not compatible, some columns are missing
conn = engine.connect()
conn.execute("ALTER TABLE registration ADD column 'allow' INTEGER")
conn.execute("update registration set 'allow' = 1")
session.commit()
try:
session.query(exists().where(RemoteAuthToken.token_type)).scalar()
session.commit()
except exc.OperationalError: # Database is not compatible, some columns are missing
conn = engine.connect()
conn.execute(
"ALTER TABLE remote_auth_token ADD column 'token_type' INTEGER DEFAULT 0"
)
conn.execute("update remote_auth_token set 'token_type' = 0")
session.commit()
try:
session.query(exists().where(ReadBook.read_status)).scalar()
except exc.OperationalError:
conn = engine.connect()
conn.execute(
"ALTER TABLE book_read_link ADD column 'read_status' INTEGER DEFAULT 0"
)
conn.execute("UPDATE book_read_link SET 'read_status' = 1 WHERE is_read")
conn.execute("ALTER TABLE book_read_link ADD column 'last_modified' DATETIME")
conn.execute(
"ALTER TABLE book_read_link ADD column 'last_time_started_reading' DATETIME"
)
conn.execute(
"ALTER TABLE book_read_link ADD column 'times_started_reading' INTEGER DEFAULT 0"
)
session.commit()
try:
session.query(exists().where(Shelf.uuid)).scalar()
except exc.OperationalError:
conn = engine.connect()
conn.execute("ALTER TABLE shelf ADD column 'uuid' STRING")
conn.execute("ALTER TABLE shelf ADD column 'created' DATETIME")
conn.execute("ALTER TABLE shelf ADD column 'last_modified' DATETIME")
conn.execute("ALTER TABLE book_shelf_link ADD column 'date_added' DATETIME")
for shelf in session.query(Shelf).all():
shelf.uuid = str(uuid.uuid4())
shelf.created = datetime.datetime.now()
shelf.last_modified = datetime.datetime.now()
for book_shelf in session.query(BookShelf).all():
book_shelf.date_added = datetime.datetime.now()
session.commit()
# Handle table exists, but no content
cnt = session.query(Registration).count()
if not cnt:
conn = engine.connect()
conn.execute("insert into registration (domain, allow) values('%.%',1)")
session.commit()
try:
session.query(exists().where(BookShelf.order)).scalar()
except exc.OperationalError: # Database is not compatible, some columns are missing
conn = engine.connect()
conn.execute("ALTER TABLE book_shelf_link ADD column 'order' INTEGER DEFAULT 1")
session.commit()
try:
create = False
session.query(exists().where(User.sidebar_view)).scalar()
except exc.OperationalError: # Database is not compatible, some columns are missing
conn = engine.connect()
conn.execute("ALTER TABLE user ADD column `sidebar_view` Integer DEFAULT 1")
session.commit()
create = True
try:
if create:
conn = engine.connect()
conn.execute("SELECT language_books FROM user")
session.commit()
except exc.OperationalError:
conn = engine.connect()
conn.execute(
"UPDATE user SET 'sidebar_view' = (random_books* :side_random + language_books * :side_lang "
"+ series_books * :side_series + category_books * :side_category + hot_books * "
":side_hot + :side_autor + :detail_random)",
{
"side_random": constants.SIDEBAR_RANDOM,
"side_lang": constants.SIDEBAR_LANGUAGE,
"side_series": constants.SIDEBAR_SERIES,
"side_category": constants.SIDEBAR_CATEGORY,
"side_hot": constants.SIDEBAR_HOT,
"side_autor": constants.SIDEBAR_AUTHOR,
"detail_random": constants.DETAIL_RANDOM,
},
)
session.commit()
try:
session.query(exists().where(User.denied_tags)).scalar()
except exc.OperationalError: # Database is not compatible, some columns are missing
conn = engine.connect()
conn.execute("ALTER TABLE user ADD column `denied_tags` String DEFAULT ''")
conn.execute("ALTER TABLE user ADD column `allowed_tags` String DEFAULT ''")
conn.execute("ALTER TABLE user ADD column `denied_column_value` DEFAULT ''")
conn.execute("ALTER TABLE user ADD column `allowed_column_value` DEFAULT ''")
if (
session.query(User)
.filter(User.role.op("&")(constants.ROLE_ANONYMOUS) == constants.ROLE_ANONYMOUS)
.first()
is None
):
create_anonymous_user(session)
try:
# check if one table with autoincrement is existing (should be user table)
conn = engine.connect()
conn.execute("SELECT COUNT(*) FROM sqlite_sequence WHERE name='user'")
except exc.OperationalError:
# Create new table user_id and copy contents of table user into it
conn = engine.connect()
conn.execute(
"CREATE TABLE user_id (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,"
" nickname VARCHAR(64),"
"email VARCHAR(120),"
"role SMALLINT,"
"password VARCHAR,"
"kindle_mail VARCHAR(120),"
"locale VARCHAR(2),"
"sidebar_view INTEGER,"
"default_language VARCHAR(3),"
"UNIQUE (nickname),"
"UNIQUE (email))"
)
conn.execute(
"INSERT INTO user_id(id, nickname, email, role, password, kindle_mail,locale,"
"sidebar_view, default_language) "
"SELECT id, nickname, email, role, password, kindle_mail, locale,"
"sidebar_view, default_language FROM user"
)
# delete old user table and rename new user_id table to user:
conn.execute("DROP TABLE user")
conn.execute("ALTER TABLE user_id RENAME TO user")
session.commit()
# Remove login capability of user Guest
conn = engine.connect()
conn.execute(
"UPDATE user SET password='' where nickname = 'Guest' and password !=''"
)
session.commit()
|
https://github.com/janeczku/calibre-web/issues/1391
|
[2020-05-13 09:25:49,687] INFO {cps.kobo:132} Kobo library sync request received.
[2020-05-13 09:25:49,687] DEBUG {cps.kobo:134} Kobo: Received unproxied request, changed request port to server port
[2020-05-13 09:25:49,737] ERROR {cps:1892} Exception on /kobo/token*******/v1/library/sync [GET]
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/calibre-web/app/cps/kobo_auth.py", line 112, in inner
return f(*args, **kwargs)
File "/calibre-web/app/cps/web.py", line 237, in inner
return f(*args, **kwargs)
File "/calibre-web/app/cps/kobo.py", line 190, in HandleSyncRequest
entitlement["ReadingState"] = get_kobo_reading_state_response(book, kobo_reading_state)
File "/calibre-web/app/cps/kobo.py", line 729, in get_kobo_reading_state_response
"StatusInfo": get_status_info_response(kobo_reading_state.book_read_link),
File "/calibre-web/app/cps/kobo.py", line 737, in get_status_info_response
"LastModified": convert_to_kobo_timestamp_string(book_read.last_modified),
File "/calibre-web/app/cps/kobo.py", line 124, in convert_to_kobo_timestamp_string
return timestamp.strftime("%Y-%m-%dT%H:%M:%SZ")
AttributeError: 'NoneType' object has no attribute 'strftime'
|
AttributeError
|
def get_metadata(book):
download_urls = []
for book_data in book.data:
if book_data.format not in KOBO_FORMATS:
continue
for kobo_format in KOBO_FORMATS[book_data.format]:
# log.debug('Id: %s, Format: %s' % (book.id, kobo_format))
download_urls.append(
{
"Format": kobo_format,
"Size": book_data.uncompressed_size,
"Url": get_download_url_for_book(book, book_data.format),
# The Kobo forma accepts platforms: (Generic, Android)
"Platform": "Generic",
# "DrmType": "None", # Not required
}
)
book_uuid = book.uuid
metadata = {
"Categories": [
"00000000-0000-0000-0000-000000000001",
],
"Contributors": get_author(book),
"CoverImageId": book_uuid,
"CrossRevisionId": book_uuid,
"CurrentDisplayPrice": {"CurrencyCode": "USD", "TotalAmount": 0},
"CurrentLoveDisplayPrice": {"TotalAmount": 0},
"Description": get_description(book),
"DownloadUrls": download_urls,
"EntitlementId": book_uuid,
"ExternalIds": [],
"Genre": "00000000-0000-0000-0000-000000000001",
"IsEligibleForKoboLove": False,
"IsInternetArchive": False,
"IsPreOrder": False,
"IsSocialEnabled": True,
"Language": "en",
"PhoneticPronunciations": {},
# TODO: Fix book.pubdate to return a datetime object so that we can easily
# convert it to the format Kobo devices expect.
"PublicationDate": book.pubdate,
"Publisher": {
"Imprint": "",
"Name": get_publisher(book),
},
"RevisionId": book_uuid,
"Title": book.title,
"WorkId": book_uuid,
}
if get_series(book):
if sys.version_info < (3, 0):
name = get_series(book).encode("utf-8")
else:
name = get_series(book)
metadata["Series"] = {
"Name": get_series(book),
"Number": get_seriesindex(book), # ToDo Check int() ?
"NumberFloat": float(get_seriesindex(book)),
# Get a deterministic id based on the series name.
"Id": uuid.uuid3(uuid.NAMESPACE_DNS, name),
}
return metadata
|
def get_metadata(book):
download_urls = []
for book_data in book.data:
if book_data.format not in KOBO_FORMATS:
continue
for kobo_format in KOBO_FORMATS[book_data.format]:
# log.debug('Id: %s, Format: %s' % (book.id, kobo_format))
download_urls.append(
{
"Format": kobo_format,
"Size": book_data.uncompressed_size,
"Url": get_download_url_for_book(book, book_data.format),
# The Kobo forma accepts platforms: (Generic, Android)
"Platform": "Generic",
# "DrmType": "None", # Not required
}
)
book_uuid = book.uuid
metadata = {
"Categories": [
"00000000-0000-0000-0000-000000000001",
],
"Contributors": get_author(book),
"CoverImageId": book_uuid,
"CrossRevisionId": book_uuid,
"CurrentDisplayPrice": {"CurrencyCode": "USD", "TotalAmount": 0},
"CurrentLoveDisplayPrice": {"TotalAmount": 0},
"Description": get_description(book),
"DownloadUrls": download_urls,
"EntitlementId": book_uuid,
"ExternalIds": [],
"Genre": "00000000-0000-0000-0000-000000000001",
"IsEligibleForKoboLove": False,
"IsInternetArchive": False,
"IsPreOrder": False,
"IsSocialEnabled": True,
"Language": "en",
"PhoneticPronunciations": {},
# TODO: Fix book.pubdate to return a datetime object so that we can easily
# convert it to the format Kobo devices expect.
"PublicationDate": book.pubdate,
"Publisher": {
"Imprint": "",
"Name": get_publisher(book),
},
"RevisionId": book_uuid,
"Title": book.title,
"WorkId": book_uuid,
}
if get_series(book):
if sys.version_info < (3, 0):
name = get_series(book).encode("utf-8")
else:
name = get_series(book)
metadata["Series"] = {
"Name": get_series(book),
"Number": book.series_index, # ToDo Check int() ?
"NumberFloat": float(book.series_index),
# Get a deterministic id based on the series name.
"Id": uuid.uuid3(uuid.NAMESPACE_DNS, name),
}
return metadata
|
https://github.com/janeczku/calibre-web/issues/1391
|
[2020-05-13 09:25:49,687] INFO {cps.kobo:132} Kobo library sync request received.
[2020-05-13 09:25:49,687] DEBUG {cps.kobo:134} Kobo: Received unproxied request, changed request port to server port
[2020-05-13 09:25:49,737] ERROR {cps:1892} Exception on /kobo/token*******/v1/library/sync [GET]
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/calibre-web/app/cps/kobo_auth.py", line 112, in inner
return f(*args, **kwargs)
File "/calibre-web/app/cps/web.py", line 237, in inner
return f(*args, **kwargs)
File "/calibre-web/app/cps/kobo.py", line 190, in HandleSyncRequest
entitlement["ReadingState"] = get_kobo_reading_state_response(book, kobo_reading_state)
File "/calibre-web/app/cps/kobo.py", line 729, in get_kobo_reading_state_response
"StatusInfo": get_status_info_response(kobo_reading_state.book_read_link),
File "/calibre-web/app/cps/kobo.py", line 737, in get_status_info_response
"LastModified": convert_to_kobo_timestamp_string(book_read.last_modified),
File "/calibre-web/app/cps/kobo.py", line 124, in convert_to_kobo_timestamp_string
return timestamp.strftime("%Y-%m-%dT%H:%M:%SZ")
AttributeError: 'NoneType' object has no attribute 'strftime'
|
AttributeError
|
def run(self):
main_thread = _get_main_thread()
while main_thread.is_alive():
doLock = threading.Lock()
doLock.acquire()
if self.current != self.last:
index = self.current
doLock.release()
if self.queue[index]["taskType"] == TASK_EMAIL:
self._send_raw_email()
if self.queue[index]["taskType"] == TASK_CONVERT:
self._convert_any_format()
if self.queue[index]["taskType"] == TASK_CONVERT_ANY:
self._convert_any_format()
# TASK_UPLOAD is handled implicitly
doLock.acquire()
self.current += 1
doLock.release()
else:
doLock.release()
if main_thread.is_alive():
time.sleep(1)
|
def run(self):
main_thread = _get_main_thread()
while main_thread.is_alive():
doLock = threading.Lock()
doLock.acquire()
if self.current != self.last:
doLock.release()
if self.queue[self.current]["taskType"] == TASK_EMAIL:
self._send_raw_email()
if self.queue[self.current]["taskType"] == TASK_CONVERT:
self._convert_any_format()
if self.queue[self.current]["taskType"] == TASK_CONVERT_ANY:
self._convert_any_format()
# TASK_UPLOAD is handled implicitly
self.current += 1
else:
doLock.release()
if main_thread.is_alive():
time.sleep(1)
|
https://github.com/janeczku/calibre-web/issues/890
|
Traceback (most recent call last):
File "calibre-web/vendor/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "calibre-web/vendor/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "calibre-web/vendor/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "calibre-web/cps/web.py", line 348, in decorated_view
return func(*args, **kwargs)
File "calibre-web/cps/web.py", line 490, in inner
return f(*args, **kwargs)
File "calibre-web/cps/web.py", line 3868, in upload
"<a href=\"" + url_for('show_book', book_id=book.id) + "\">" + uploadText + "</a>")
File "calibre-web/cps/worker.py", line 406, in add_upload
self.UIqueue[self.current]['formStarttime'] = self.queue[self.current]['starttime']
IndexError: list index out of range
|
IndexError
|
def get_taskstatus(self):
doLock = threading.Lock()
doLock.acquire()
if self.current < len(self.queue):
if self.UIqueue[self.current]["stat"] == STAT_STARTED:
if self.queue[self.current]["taskType"] == TASK_EMAIL:
self.UIqueue[self.current]["progress"] = self.get_send_status()
self.UIqueue[self.current]["formRuntime"] = (
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["rt"] = (
self.UIqueue[self.current]["formRuntime"].days * 24 * 60
+ self.UIqueue[self.current]["formRuntime"].seconds
+ self.UIqueue[self.current]["formRuntime"].microseconds
)
doLock.release()
return self.UIqueue
|
def get_taskstatus(self):
if self.current < len(self.queue):
if self.UIqueue[self.current]["stat"] == STAT_STARTED:
if self.queue[self.current]["taskType"] == TASK_EMAIL:
self.UIqueue[self.current]["progress"] = self.get_send_status()
self.UIqueue[self.current]["formRuntime"] = (
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["rt"] = (
self.UIqueue[self.current]["formRuntime"].days * 24 * 60
+ self.UIqueue[self.current]["formRuntime"].seconds
+ self.UIqueue[self.current]["formRuntime"].microseconds
)
return self.UIqueue
|
https://github.com/janeczku/calibre-web/issues/890
|
Traceback (most recent call last):
File "calibre-web/vendor/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "calibre-web/vendor/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "calibre-web/vendor/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "calibre-web/cps/web.py", line 348, in decorated_view
return func(*args, **kwargs)
File "calibre-web/cps/web.py", line 490, in inner
return f(*args, **kwargs)
File "calibre-web/cps/web.py", line 3868, in upload
"<a href=\"" + url_for('show_book', book_id=book.id) + "\">" + uploadText + "</a>")
File "calibre-web/cps/worker.py", line 406, in add_upload
self.UIqueue[self.current]['formStarttime'] = self.queue[self.current]['starttime']
IndexError: list index out of range
|
IndexError
|
def _convert_any_format(self):
# convert book, and upload in case of google drive
doLock = threading.Lock()
doLock.acquire()
index = self.current
doLock.release()
self.UIqueue[index]["stat"] = STAT_STARTED
self.queue[index]["starttime"] = datetime.now()
self.UIqueue[index]["formStarttime"] = self.queue[self.current]["starttime"]
curr_task = self.queue[index]["taskType"]
filename = self._convert_ebook_format()
if filename:
if config.config_use_google_drive:
gdriveutils.updateGdriveCalibreFromLocal()
if curr_task == TASK_CONVERT:
self.add_email(
self.queue[index]["settings"]["subject"],
self.queue[index]["path"],
filename,
self.queue[index]["settings"],
self.queue[index]["kindle"],
self.UIqueue[index]["user"],
self.queue[index]["title"],
self.queue[index]["settings"]["body"],
)
|
def _convert_any_format(self):
# convert book, and upload in case of google drive
self.UIqueue[self.current]["stat"] = STAT_STARTED
self.queue[self.current]["starttime"] = datetime.now()
self.UIqueue[self.current]["formStarttime"] = self.queue[self.current]["starttime"]
curr_task = self.queue[self.current]["taskType"]
filename = self._convert_ebook_format()
if filename:
if config.config_use_google_drive:
gdriveutils.updateGdriveCalibreFromLocal()
if curr_task == TASK_CONVERT:
self.add_email(
self.queue[self.current]["settings"]["subject"],
self.queue[self.current]["path"],
filename,
self.queue[self.current]["settings"],
self.queue[self.current]["kindle"],
self.UIqueue[self.current]["user"],
self.queue[self.current]["title"],
self.queue[self.current]["settings"]["body"],
)
|
https://github.com/janeczku/calibre-web/issues/890
|
Traceback (most recent call last):
File "calibre-web/vendor/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "calibre-web/vendor/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "calibre-web/vendor/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "calibre-web/cps/web.py", line 348, in decorated_view
return func(*args, **kwargs)
File "calibre-web/cps/web.py", line 490, in inner
return f(*args, **kwargs)
File "calibre-web/cps/web.py", line 3868, in upload
"<a href=\"" + url_for('show_book', book_id=book.id) + "\">" + uploadText + "</a>")
File "calibre-web/cps/worker.py", line 406, in add_upload
self.UIqueue[self.current]['formStarttime'] = self.queue[self.current]['starttime']
IndexError: list index out of range
|
IndexError
|
def _convert_ebook_format(self):
error_message = None
doLock = threading.Lock()
doLock.acquire()
index = self.current
doLock.release()
file_path = self.queue[index]["file_path"]
bookid = self.queue[index]["bookid"]
format_old_ext = "." + self.queue[index]["settings"]["old_book_format"].lower()
format_new_ext = "." + self.queue[index]["settings"]["new_book_format"].lower()
# check to see if destination format already exists -
# if it does - mark the conversion task as complete and return a success
# this will allow send to kindle workflow to continue to work
if os.path.isfile(file_path + format_new_ext):
log.info("Book id %d already converted to %s", bookid, format_new_ext)
cur_book = db.session.query(db.Books).filter(db.Books.id == bookid).first()
self.queue[index]["path"] = file_path
self.queue[index]["title"] = cur_book.title
self._handleSuccess()
return file_path + format_new_ext
else:
log.info(
"Book id %d - target format of %s does not exist. Moving forward with convert.",
bookid,
format_new_ext,
)
# check if converter-executable is existing
if not os.path.exists(config.config_converterpath):
# ToDo Text is not translated
self._handleError("Convertertool %s not found" % config.config_converterpath)
return
try:
# check which converter to use kindlegen is "1"
if format_old_ext == ".epub" and format_new_ext == ".mobi":
if config.config_ebookconverter == 1:
"""if os.name == 'nt':
command = config.config_converterpath + u' "' + file_path + u'.epub"'
if sys.version_info < (3, 0):
command = command.encode(sys.getfilesystemencoding())
else:"""
command = [config.config_converterpath, file_path + ".epub"]
quotes = [1]
if config.config_ebookconverter == 2:
# Linux py2.7 encode as list without quotes no empty element for parameters
# linux py3.x no encode and as list without quotes no empty element for parameters
# windows py2.7 encode as string with quotes empty element for parameters is okay
# windows py 3.x no encode and as string with quotes empty element for parameters is okay
# separate handling for windows and linux
quotes = [1, 2]
"""if os.name == 'nt':
command = config.config_converterpath + u' "' + file_path + format_old_ext + u'" "' + \
file_path + format_new_ext + u'" ' + config.config_calibre
if sys.version_info < (3, 0):
command = command.encode(sys.getfilesystemencoding())
else:"""
command = [
config.config_converterpath,
(file_path + format_old_ext),
(file_path + format_new_ext),
]
quotes_index = 3
if config.config_calibre:
parameters = config.config_calibre.split(" ")
for param in parameters:
command.append(param)
quotes.append(quotes_index)
quotes_index += 1
p = process_open(command, quotes)
# p = subprocess.Popen(command, stdout=subprocess.PIPE, universal_newlines=True)
except OSError as e:
self._handleError(_("Ebook-converter failed: %(error)s", error=e))
return
if config.config_ebookconverter == 1:
nextline = p.communicate()[0]
# Format of error message (kindlegen translates its output texts):
# Error(prcgen):E23006: Language not recognized in metadata.The dc:Language field is mandatory.Aborting.
conv_error = re.search(r".*\(.*\):(E\d+):\s(.*)", nextline, re.MULTILINE)
# If error occoures, store error message for logfile
if conv_error:
error_message = _(
"Kindlegen failed with Error %(error)s. Message: %(message)s",
error=conv_error.group(1),
message=conv_error.group(2).strip(),
)
log.debug("convert_kindlegen: %s", nextline)
else:
while p.poll() is None:
nextline = p.stdout.readline()
if os.name == "nt" and sys.version_info < (3, 0):
nextline = nextline.decode("windows-1252")
elif os.name == "posix" and sys.version_info < (3, 0):
nextline = nextline.decode("utf-8")
log.debug(nextline.strip("\r\n"))
# parse progress string from calibre-converter
progress = re.search(r"(\d+)%\s.*", nextline)
if progress:
self.UIqueue[index]["progress"] = progress.group(1) + " %"
# process returncode
check = p.returncode
calibre_traceback = p.stderr.readlines()
for ele in calibre_traceback:
if sys.version_info < (3, 0):
ele = ele.decode("utf-8")
log.debug(ele.strip("\n"))
if not ele.startswith("Traceback") and not ele.startswith(" File"):
error_message = "Calibre failed with error: %s" % ele.strip("\n")
# kindlegen returncodes
# 0 = Info(prcgen):I1036: Mobi file built successfully
# 1 = Info(prcgen):I1037: Mobi file built with WARNINGS!
# 2 = Info(prcgen):I1038: MOBI file could not be generated because of errors!
if (check < 2 and config.config_ebookconverter == 1) or (
check == 0 and config.config_ebookconverter == 2
):
cur_book = db.session.query(db.Books).filter(db.Books.id == bookid).first()
if os.path.isfile(file_path + format_new_ext):
new_format = db.Data(
name=cur_book.data[0].name,
book_format=self.queue[index]["settings"]["new_book_format"].upper(),
book=bookid,
uncompressed_size=os.path.getsize(file_path + format_new_ext),
)
cur_book.data.append(new_format)
db.session.commit()
self.queue[index]["path"] = cur_book.path
self.queue[index]["title"] = cur_book.title
if config.config_use_google_drive:
os.remove(file_path + format_old_ext)
self._handleSuccess()
return file_path + format_new_ext
else:
error_message = format_new_ext.upper() + " format not found on disk"
log.info("ebook converter failed with error while converting book")
if not error_message:
error_message = "Ebook converter failed with unknown error"
self._handleError(error_message)
return
|
def _convert_ebook_format(self):
error_message = None
file_path = self.queue[self.current]["file_path"]
bookid = self.queue[self.current]["bookid"]
format_old_ext = (
"." + self.queue[self.current]["settings"]["old_book_format"].lower()
)
format_new_ext = (
"." + self.queue[self.current]["settings"]["new_book_format"].lower()
)
# check to see if destination format already exists -
# if it does - mark the conversion task as complete and return a success
# this will allow send to kindle workflow to continue to work
if os.path.isfile(file_path + format_new_ext):
log.info("Book id %d already converted to %s", bookid, format_new_ext)
cur_book = db.session.query(db.Books).filter(db.Books.id == bookid).first()
self.queue[self.current]["path"] = file_path
self.queue[self.current]["title"] = cur_book.title
self._handleSuccess()
return file_path + format_new_ext
else:
log.info(
"Book id %d - target format of %s does not exist. Moving forward with convert.",
bookid,
format_new_ext,
)
# check if converter-executable is existing
if not os.path.exists(config.config_converterpath):
# ToDo Text is not translated
self._handleError("Convertertool %s not found" % config.config_converterpath)
return
try:
# check which converter to use kindlegen is "1"
if format_old_ext == ".epub" and format_new_ext == ".mobi":
if config.config_ebookconverter == 1:
"""if os.name == 'nt':
command = config.config_converterpath + u' "' + file_path + u'.epub"'
if sys.version_info < (3, 0):
command = command.encode(sys.getfilesystemencoding())
else:"""
command = [config.config_converterpath, file_path + ".epub"]
quotes = [1]
if config.config_ebookconverter == 2:
# Linux py2.7 encode as list without quotes no empty element for parameters
# linux py3.x no encode and as list without quotes no empty element for parameters
# windows py2.7 encode as string with quotes empty element for parameters is okay
# windows py 3.x no encode and as string with quotes empty element for parameters is okay
# separate handling for windows and linux
quotes = [1, 2]
"""if os.name == 'nt':
command = config.config_converterpath + u' "' + file_path + format_old_ext + u'" "' + \
file_path + format_new_ext + u'" ' + config.config_calibre
if sys.version_info < (3, 0):
command = command.encode(sys.getfilesystemencoding())
else:"""
command = [
config.config_converterpath,
(file_path + format_old_ext),
(file_path + format_new_ext),
]
index = 3
if config.config_calibre:
parameters = config.config_calibre.split(" ")
for param in parameters:
command.append(param)
quotes.append(index)
index += 1
p = process_open(command, quotes)
# p = subprocess.Popen(command, stdout=subprocess.PIPE, universal_newlines=True)
except OSError as e:
self._handleError(_("Ebook-converter failed: %(error)s", error=e))
return
if config.config_ebookconverter == 1:
nextline = p.communicate()[0]
# Format of error message (kindlegen translates its output texts):
# Error(prcgen):E23006: Language not recognized in metadata.The dc:Language field is mandatory.Aborting.
conv_error = re.search(r".*\(.*\):(E\d+):\s(.*)", nextline, re.MULTILINE)
# If error occoures, store error message for logfile
if conv_error:
error_message = _(
"Kindlegen failed with Error %(error)s. Message: %(message)s",
error=conv_error.group(1),
message=conv_error.group(2).strip(),
)
log.debug("convert_kindlegen: %s", nextline)
else:
while p.poll() is None:
nextline = p.stdout.readline()
if os.name == "nt" and sys.version_info < (3, 0):
nextline = nextline.decode("windows-1252")
elif os.name == "posix" and sys.version_info < (3, 0):
nextline = nextline.decode("utf-8")
log.debug(nextline.strip("\r\n"))
# parse progress string from calibre-converter
progress = re.search(r"(\d+)%\s.*", nextline)
if progress:
self.UIqueue[self.current]["progress"] = progress.group(1) + " %"
# process returncode
check = p.returncode
calibre_traceback = p.stderr.readlines()
for ele in calibre_traceback:
if sys.version_info < (3, 0):
ele = ele.decode("utf-8")
log.debug(ele.strip("\n"))
if not ele.startswith("Traceback") and not ele.startswith(" File"):
error_message = "Calibre failed with error: %s" % ele.strip("\n")
# kindlegen returncodes
# 0 = Info(prcgen):I1036: Mobi file built successfully
# 1 = Info(prcgen):I1037: Mobi file built with WARNINGS!
# 2 = Info(prcgen):I1038: MOBI file could not be generated because of errors!
if (check < 2 and config.config_ebookconverter == 1) or (
check == 0 and config.config_ebookconverter == 2
):
cur_book = db.session.query(db.Books).filter(db.Books.id == bookid).first()
if os.path.isfile(file_path + format_new_ext):
new_format = db.Data(
name=cur_book.data[0].name,
book_format=self.queue[self.current]["settings"][
"new_book_format"
].upper(),
book=bookid,
uncompressed_size=os.path.getsize(file_path + format_new_ext),
)
cur_book.data.append(new_format)
db.session.commit()
self.queue[self.current]["path"] = cur_book.path
self.queue[self.current]["title"] = cur_book.title
if config.config_use_google_drive:
os.remove(file_path + format_old_ext)
self._handleSuccess()
return file_path + format_new_ext
else:
error_message = format_new_ext.upper() + " format not found on disk"
log.info("ebook converter failed with error while converting book")
if not error_message:
error_message = "Ebook converter failed with unknown error"
self._handleError(error_message)
return
|
https://github.com/janeczku/calibre-web/issues/890
|
Traceback (most recent call last):
File "calibre-web/vendor/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "calibre-web/vendor/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "calibre-web/vendor/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "calibre-web/cps/web.py", line 348, in decorated_view
return func(*args, **kwargs)
File "calibre-web/cps/web.py", line 490, in inner
return f(*args, **kwargs)
File "calibre-web/cps/web.py", line 3868, in upload
"<a href=\"" + url_for('show_book', book_id=book.id) + "\">" + uploadText + "</a>")
File "calibre-web/cps/worker.py", line 406, in add_upload
self.UIqueue[self.current]['formStarttime'] = self.queue[self.current]['starttime']
IndexError: list index out of range
|
IndexError
|
def _send_raw_email(self):
doLock = threading.Lock()
doLock.acquire()
index = self.current
doLock.release()
self.queue[index]["starttime"] = datetime.now()
self.UIqueue[index]["formStarttime"] = self.queue[index]["starttime"]
self.UIqueue[index]["stat"] = STAT_STARTED
obj = self.queue[index]
# create MIME message
msg = MIMEMultipart()
msg["Subject"] = self.queue[index]["subject"]
msg["Message-Id"] = make_msgid("calibre-web")
msg["Date"] = formatdate(localtime=True)
text = self.queue[index]["text"]
msg.attach(MIMEText(text.encode("UTF-8"), "plain", "UTF-8"))
if obj["attachment"]:
result = get_attachment(obj["filepath"], obj["attachment"])
if result:
msg.attach(result)
else:
self._handleError("Attachment not found")
return
msg["From"] = obj["settings"]["mail_from"]
msg["To"] = obj["recipent"]
use_ssl = int(obj["settings"].get("mail_use_ssl", 0))
try:
# convert MIME message to string
fp = StringIO()
gen = Generator(fp, mangle_from_=False)
gen.flatten(msg)
msg = fp.getvalue()
# send email
timeout = 600 # set timeout to 5mins
# redirect output to logfile on python2 pn python3 debugoutput is caught with overwritten
# _print_debug function
if sys.version_info < (3, 0):
org_smtpstderr = smtplib.stderr
smtplib.stderr = logger.StderrLogger("worker.smtp")
if use_ssl == 2:
self.asyncSMTP = email_SSL(
obj["settings"]["mail_server"], obj["settings"]["mail_port"], timeout
)
else:
self.asyncSMTP = email(
obj["settings"]["mail_server"], obj["settings"]["mail_port"], timeout
)
# link to logginglevel
if logger.is_debug_enabled():
self.asyncSMTP.set_debuglevel(1)
if use_ssl == 1:
self.asyncSMTP.starttls()
if obj["settings"]["mail_password"]:
self.asyncSMTP.login(
str(obj["settings"]["mail_login"]),
str(obj["settings"]["mail_password"]),
)
self.asyncSMTP.sendmail(obj["settings"]["mail_from"], obj["recipent"], msg)
self.asyncSMTP.quit()
self._handleSuccess()
if sys.version_info < (3, 0):
smtplib.stderr = org_smtpstderr
except MemoryError as e:
self._handleError("Error sending email: " + e.message)
return None
except (smtplib.SMTPException, smtplib.SMTPAuthenticationError) as e:
if hasattr(e, "smtp_error"):
text = e.smtp_error.decode("utf-8").replace("\n", ". ")
elif hasattr(e, "message"):
text = e.message
else:
text = ""
self._handleError("Error sending email: " + text)
return None
except socket.error as e:
self._handleError("Error sending email: " + e.strerror)
return None
|
def _send_raw_email(self):
self.queue[self.current]["starttime"] = datetime.now()
self.UIqueue[self.current]["formStarttime"] = self.queue[self.current]["starttime"]
self.UIqueue[self.current]["stat"] = STAT_STARTED
obj = self.queue[self.current]
# create MIME message
msg = MIMEMultipart()
msg["Subject"] = self.queue[self.current]["subject"]
msg["Message-Id"] = make_msgid("calibre-web")
msg["Date"] = formatdate(localtime=True)
text = self.queue[self.current]["text"]
msg.attach(MIMEText(text.encode("UTF-8"), "plain", "UTF-8"))
if obj["attachment"]:
result = get_attachment(obj["filepath"], obj["attachment"])
if result:
msg.attach(result)
else:
self._handleError("Attachment not found")
return
msg["From"] = obj["settings"]["mail_from"]
msg["To"] = obj["recipent"]
use_ssl = int(obj["settings"].get("mail_use_ssl", 0))
try:
# convert MIME message to string
fp = StringIO()
gen = Generator(fp, mangle_from_=False)
gen.flatten(msg)
msg = fp.getvalue()
# send email
timeout = 600 # set timeout to 5mins
# redirect output to logfile on python2 pn python3 debugoutput is caught with overwritten
# _print_debug function
if sys.version_info < (3, 0):
org_smtpstderr = smtplib.stderr
smtplib.stderr = logger.StderrLogger("worker.smtp")
if use_ssl == 2:
self.asyncSMTP = email_SSL(
obj["settings"]["mail_server"], obj["settings"]["mail_port"], timeout
)
else:
self.asyncSMTP = email(
obj["settings"]["mail_server"], obj["settings"]["mail_port"], timeout
)
# link to logginglevel
if logger.is_debug_enabled():
self.asyncSMTP.set_debuglevel(1)
if use_ssl == 1:
self.asyncSMTP.starttls()
if obj["settings"]["mail_password"]:
self.asyncSMTP.login(
str(obj["settings"]["mail_login"]),
str(obj["settings"]["mail_password"]),
)
self.asyncSMTP.sendmail(obj["settings"]["mail_from"], obj["recipent"], msg)
self.asyncSMTP.quit()
self._handleSuccess()
if sys.version_info < (3, 0):
smtplib.stderr = org_smtpstderr
except MemoryError as e:
self._handleError("Error sending email: " + e.message)
return None
except (smtplib.SMTPException, smtplib.SMTPAuthenticationError) as e:
if hasattr(e, "smtp_error"):
text = e.smtp_error.decode("utf-8").replace("\n", ". ")
elif hasattr(e, "message"):
text = e.message
else:
text = ""
self._handleError("Error sending email: " + text)
return None
except socket.error as e:
self._handleError("Error sending email: " + e.strerror)
return None
|
https://github.com/janeczku/calibre-web/issues/890
|
Traceback (most recent call last):
File "calibre-web/vendor/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "calibre-web/vendor/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "calibre-web/vendor/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "calibre-web/cps/web.py", line 348, in decorated_view
return func(*args, **kwargs)
File "calibre-web/cps/web.py", line 490, in inner
return f(*args, **kwargs)
File "calibre-web/cps/web.py", line 3868, in upload
"<a href=\"" + url_for('show_book', book_id=book.id) + "\">" + uploadText + "</a>")
File "calibre-web/cps/worker.py", line 406, in add_upload
self.UIqueue[self.current]['formStarttime'] = self.queue[self.current]['starttime']
IndexError: list index out of range
|
IndexError
|
def _handleError(self, error_message):
log.error(error_message)
doLock = threading.Lock()
doLock.acquire()
index = self.current
doLock.release()
self.UIqueue[index]["stat"] = STAT_FAIL
self.UIqueue[index]["progress"] = "100 %"
self.UIqueue[index]["formRuntime"] = (
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[index]["message"] = error_message
|
def _handleError(self, error_message):
log.error(error_message)
self.UIqueue[self.current]["stat"] = STAT_FAIL
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["formRuntime"] = (
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["message"] = error_message
|
https://github.com/janeczku/calibre-web/issues/890
|
Traceback (most recent call last):
File "calibre-web/vendor/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "calibre-web/vendor/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "calibre-web/vendor/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "calibre-web/cps/web.py", line 348, in decorated_view
return func(*args, **kwargs)
File "calibre-web/cps/web.py", line 490, in inner
return f(*args, **kwargs)
File "calibre-web/cps/web.py", line 3868, in upload
"<a href=\"" + url_for('show_book', book_id=book.id) + "\">" + uploadText + "</a>")
File "calibre-web/cps/worker.py", line 406, in add_upload
self.UIqueue[self.current]['formStarttime'] = self.queue[self.current]['starttime']
IndexError: list index out of range
|
IndexError
|
def _handleSuccess(self):
doLock = threading.Lock()
doLock.acquire()
index = self.current
doLock.release()
self.UIqueue[index]["stat"] = STAT_FINISH_SUCCESS
self.UIqueue[index]["progress"] = "100 %"
self.UIqueue[index]["formRuntime"] = (
datetime.now() - self.queue[self.current]["starttime"]
)
|
def _handleSuccess(self):
self.UIqueue[self.current]["stat"] = STAT_FINISH_SUCCESS
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["formRuntime"] = (
datetime.now() - self.queue[self.current]["starttime"]
)
|
https://github.com/janeczku/calibre-web/issues/890
|
Traceback (most recent call last):
File "calibre-web/vendor/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "calibre-web/vendor/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "calibre-web/vendor/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "calibre-web/cps/web.py", line 348, in decorated_view
return func(*args, **kwargs)
File "calibre-web/cps/web.py", line 490, in inner
return f(*args, **kwargs)
File "calibre-web/cps/web.py", line 3868, in upload
"<a href=\"" + url_for('show_book', book_id=book.id) + "\">" + uploadText + "</a>")
File "calibre-web/cps/worker.py", line 406, in add_upload
self.UIqueue[self.current]['formStarttime'] = self.queue[self.current]['starttime']
IndexError: list index out of range
|
IndexError
|
def __init__(self):
threading.Thread.__init__(self)
self.status = 0
self.current = 0
self.last = 0
self.queue = list()
self.UIqueue = list()
self.asyncSMTP = None
self.id = 0
self.doLock = threading.Lock()
|
def __init__(self):
threading.Thread.__init__(self)
self.status = 0
self.current = 0
self.last = 0
self.queue = list()
self.UIqueue = list()
self.asyncSMTP = None
self.id = 0
|
https://github.com/janeczku/calibre-web/issues/890
|
Traceback (most recent call last):
File "calibre-web/vendor/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "calibre-web/vendor/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "calibre-web/vendor/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "calibre-web/cps/web.py", line 348, in decorated_view
return func(*args, **kwargs)
File "calibre-web/cps/web.py", line 490, in inner
return f(*args, **kwargs)
File "calibre-web/cps/web.py", line 3868, in upload
"<a href=\"" + url_for('show_book', book_id=book.id) + "\">" + uploadText + "</a>")
File "calibre-web/cps/worker.py", line 406, in add_upload
self.UIqueue[self.current]['formStarttime'] = self.queue[self.current]['starttime']
IndexError: list index out of range
|
IndexError
|
def run(self):
main_thread = _get_main_thread()
while main_thread.is_alive():
self.doLock.acquire()
if self.current != self.last:
index = self.current
self.doLock.release()
if self.queue[index]["taskType"] == TASK_EMAIL:
self._send_raw_email()
if self.queue[index]["taskType"] == TASK_CONVERT:
self._convert_any_format()
if self.queue[index]["taskType"] == TASK_CONVERT_ANY:
self._convert_any_format()
# TASK_UPLOAD is handled implicitly
self.doLock.acquire()
self.current += 1
self.doLock.release()
else:
self.doLock.release()
if main_thread.is_alive():
time.sleep(1)
|
def run(self):
main_thread = _get_main_thread()
while main_thread.is_alive():
doLock = threading.Lock()
doLock.acquire()
if self.current != self.last:
index = self.current
doLock.release()
if self.queue[index]["taskType"] == TASK_EMAIL:
self._send_raw_email()
if self.queue[index]["taskType"] == TASK_CONVERT:
self._convert_any_format()
if self.queue[index]["taskType"] == TASK_CONVERT_ANY:
self._convert_any_format()
# TASK_UPLOAD is handled implicitly
doLock.acquire()
self.current += 1
doLock.release()
else:
doLock.release()
if main_thread.is_alive():
time.sleep(1)
|
https://github.com/janeczku/calibre-web/issues/890
|
Traceback (most recent call last):
File "calibre-web/vendor/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "calibre-web/vendor/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "calibre-web/vendor/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "calibre-web/cps/web.py", line 348, in decorated_view
return func(*args, **kwargs)
File "calibre-web/cps/web.py", line 490, in inner
return f(*args, **kwargs)
File "calibre-web/cps/web.py", line 3868, in upload
"<a href=\"" + url_for('show_book', book_id=book.id) + "\">" + uploadText + "</a>")
File "calibre-web/cps/worker.py", line 406, in add_upload
self.UIqueue[self.current]['formStarttime'] = self.queue[self.current]['starttime']
IndexError: list index out of range
|
IndexError
|
def get_taskstatus(self):
self.doLock.acquire()
if self.current < len(self.queue):
if self.UIqueue[self.current]["stat"] == STAT_STARTED:
if self.queue[self.current]["taskType"] == TASK_EMAIL:
self.UIqueue[self.current]["progress"] = self.get_send_status()
self.UIqueue[self.current]["formRuntime"] = (
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["rt"] = (
self.UIqueue[self.current]["formRuntime"].days * 24 * 60
+ self.UIqueue[self.current]["formRuntime"].seconds
+ self.UIqueue[self.current]["formRuntime"].microseconds
)
self.doLock.release()
return self.UIqueue
|
def get_taskstatus(self):
doLock = threading.Lock()
doLock.acquire()
if self.current < len(self.queue):
if self.UIqueue[self.current]["stat"] == STAT_STARTED:
if self.queue[self.current]["taskType"] == TASK_EMAIL:
self.UIqueue[self.current]["progress"] = self.get_send_status()
self.UIqueue[self.current]["formRuntime"] = (
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["rt"] = (
self.UIqueue[self.current]["formRuntime"].days * 24 * 60
+ self.UIqueue[self.current]["formRuntime"].seconds
+ self.UIqueue[self.current]["formRuntime"].microseconds
)
doLock.release()
return self.UIqueue
|
https://github.com/janeczku/calibre-web/issues/890
|
Traceback (most recent call last):
File "calibre-web/vendor/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "calibre-web/vendor/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "calibre-web/vendor/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "calibre-web/cps/web.py", line 348, in decorated_view
return func(*args, **kwargs)
File "calibre-web/cps/web.py", line 490, in inner
return f(*args, **kwargs)
File "calibre-web/cps/web.py", line 3868, in upload
"<a href=\"" + url_for('show_book', book_id=book.id) + "\">" + uploadText + "</a>")
File "calibre-web/cps/worker.py", line 406, in add_upload
self.UIqueue[self.current]['formStarttime'] = self.queue[self.current]['starttime']
IndexError: list index out of range
|
IndexError
|
def _convert_any_format(self):
# convert book, and upload in case of google drive
self.doLock.acquire()
index = self.current
self.doLock.release()
self.UIqueue[index]["stat"] = STAT_STARTED
self.queue[index]["starttime"] = datetime.now()
self.UIqueue[index]["formStarttime"] = self.queue[self.current]["starttime"]
curr_task = self.queue[index]["taskType"]
filename = self._convert_ebook_format()
if filename:
if config.config_use_google_drive:
gdriveutils.updateGdriveCalibreFromLocal()
if curr_task == TASK_CONVERT:
self.add_email(
self.queue[index]["settings"]["subject"],
self.queue[index]["path"],
filename,
self.queue[index]["settings"],
self.queue[index]["kindle"],
self.UIqueue[index]["user"],
self.queue[index]["title"],
self.queue[index]["settings"]["body"],
)
|
def _convert_any_format(self):
# convert book, and upload in case of google drive
doLock = threading.Lock()
doLock.acquire()
index = self.current
doLock.release()
self.UIqueue[index]["stat"] = STAT_STARTED
self.queue[index]["starttime"] = datetime.now()
self.UIqueue[index]["formStarttime"] = self.queue[self.current]["starttime"]
curr_task = self.queue[index]["taskType"]
filename = self._convert_ebook_format()
if filename:
if config.config_use_google_drive:
gdriveutils.updateGdriveCalibreFromLocal()
if curr_task == TASK_CONVERT:
self.add_email(
self.queue[index]["settings"]["subject"],
self.queue[index]["path"],
filename,
self.queue[index]["settings"],
self.queue[index]["kindle"],
self.UIqueue[index]["user"],
self.queue[index]["title"],
self.queue[index]["settings"]["body"],
)
|
https://github.com/janeczku/calibre-web/issues/890
|
Traceback (most recent call last):
File "calibre-web/vendor/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "calibre-web/vendor/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "calibre-web/vendor/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "calibre-web/cps/web.py", line 348, in decorated_view
return func(*args, **kwargs)
File "calibre-web/cps/web.py", line 490, in inner
return f(*args, **kwargs)
File "calibre-web/cps/web.py", line 3868, in upload
"<a href=\"" + url_for('show_book', book_id=book.id) + "\">" + uploadText + "</a>")
File "calibre-web/cps/worker.py", line 406, in add_upload
self.UIqueue[self.current]['formStarttime'] = self.queue[self.current]['starttime']
IndexError: list index out of range
|
IndexError
|
def _convert_ebook_format(self):
error_message = None
self.doLock.acquire()
index = self.current
self.doLock.release()
file_path = self.queue[index]["file_path"]
bookid = self.queue[index]["bookid"]
format_old_ext = "." + self.queue[index]["settings"]["old_book_format"].lower()
format_new_ext = "." + self.queue[index]["settings"]["new_book_format"].lower()
# check to see if destination format already exists -
# if it does - mark the conversion task as complete and return a success
# this will allow send to kindle workflow to continue to work
if os.path.isfile(file_path + format_new_ext):
log.info("Book id %d already converted to %s", bookid, format_new_ext)
cur_book = db.session.query(db.Books).filter(db.Books.id == bookid).first()
self.queue[index]["path"] = file_path
self.queue[index]["title"] = cur_book.title
self._handleSuccess()
return file_path + format_new_ext
else:
log.info(
"Book id %d - target format of %s does not exist. Moving forward with convert.",
bookid,
format_new_ext,
)
# check if converter-executable is existing
if not os.path.exists(config.config_converterpath):
# ToDo Text is not translated
self._handleError("Convertertool %s not found" % config.config_converterpath)
return
try:
# check which converter to use kindlegen is "1"
if format_old_ext == ".epub" and format_new_ext == ".mobi":
if config.config_ebookconverter == 1:
"""if os.name == 'nt':
command = config.config_converterpath + u' "' + file_path + u'.epub"'
if sys.version_info < (3, 0):
command = command.encode(sys.getfilesystemencoding())
else:"""
command = [config.config_converterpath, file_path + ".epub"]
quotes = [1]
if config.config_ebookconverter == 2:
# Linux py2.7 encode as list without quotes no empty element for parameters
# linux py3.x no encode and as list without quotes no empty element for parameters
# windows py2.7 encode as string with quotes empty element for parameters is okay
# windows py 3.x no encode and as string with quotes empty element for parameters is okay
# separate handling for windows and linux
quotes = [1, 2]
"""if os.name == 'nt':
command = config.config_converterpath + u' "' + file_path + format_old_ext + u'" "' + \
file_path + format_new_ext + u'" ' + config.config_calibre
if sys.version_info < (3, 0):
command = command.encode(sys.getfilesystemencoding())
else:"""
command = [
config.config_converterpath,
(file_path + format_old_ext),
(file_path + format_new_ext),
]
quotes_index = 3
if config.config_calibre:
parameters = config.config_calibre.split(" ")
for param in parameters:
command.append(param)
quotes.append(quotes_index)
quotes_index += 1
p = process_open(command, quotes)
# p = subprocess.Popen(command, stdout=subprocess.PIPE, universal_newlines=True)
except OSError as e:
self._handleError(_("Ebook-converter failed: %(error)s", error=e))
return
if config.config_ebookconverter == 1:
nextline = p.communicate()[0]
# Format of error message (kindlegen translates its output texts):
# Error(prcgen):E23006: Language not recognized in metadata.The dc:Language field is mandatory.Aborting.
conv_error = re.search(r".*\(.*\):(E\d+):\s(.*)", nextline, re.MULTILINE)
# If error occoures, store error message for logfile
if conv_error:
error_message = _(
"Kindlegen failed with Error %(error)s. Message: %(message)s",
error=conv_error.group(1),
message=conv_error.group(2).strip(),
)
log.debug("convert_kindlegen: %s", nextline)
else:
while p.poll() is None:
nextline = p.stdout.readline()
if os.name == "nt" and sys.version_info < (3, 0):
nextline = nextline.decode("windows-1252")
elif os.name == "posix" and sys.version_info < (3, 0):
nextline = nextline.decode("utf-8")
log.debug(nextline.strip("\r\n"))
# parse progress string from calibre-converter
progress = re.search(r"(\d+)%\s.*", nextline)
if progress:
self.UIqueue[index]["progress"] = progress.group(1) + " %"
# process returncode
check = p.returncode
calibre_traceback = p.stderr.readlines()
for ele in calibre_traceback:
if sys.version_info < (3, 0):
ele = ele.decode("utf-8")
log.debug(ele.strip("\n"))
if not ele.startswith("Traceback") and not ele.startswith(" File"):
error_message = "Calibre failed with error: %s" % ele.strip("\n")
# kindlegen returncodes
# 0 = Info(prcgen):I1036: Mobi file built successfully
# 1 = Info(prcgen):I1037: Mobi file built with WARNINGS!
# 2 = Info(prcgen):I1038: MOBI file could not be generated because of errors!
if (check < 2 and config.config_ebookconverter == 1) or (
check == 0 and config.config_ebookconverter == 2
):
cur_book = db.session.query(db.Books).filter(db.Books.id == bookid).first()
if os.path.isfile(file_path + format_new_ext):
new_format = db.Data(
name=cur_book.data[0].name,
book_format=self.queue[index]["settings"]["new_book_format"].upper(),
book=bookid,
uncompressed_size=os.path.getsize(file_path + format_new_ext),
)
cur_book.data.append(new_format)
db.session.commit()
self.queue[index]["path"] = cur_book.path
self.queue[index]["title"] = cur_book.title
if config.config_use_google_drive:
os.remove(file_path + format_old_ext)
self._handleSuccess()
return file_path + format_new_ext
else:
error_message = format_new_ext.upper() + " format not found on disk"
log.info("ebook converter failed with error while converting book")
if not error_message:
error_message = "Ebook converter failed with unknown error"
self._handleError(error_message)
return
|
def _convert_ebook_format(self):
error_message = None
doLock = threading.Lock()
doLock.acquire()
index = self.current
doLock.release()
file_path = self.queue[index]["file_path"]
bookid = self.queue[index]["bookid"]
format_old_ext = "." + self.queue[index]["settings"]["old_book_format"].lower()
format_new_ext = "." + self.queue[index]["settings"]["new_book_format"].lower()
# check to see if destination format already exists -
# if it does - mark the conversion task as complete and return a success
# this will allow send to kindle workflow to continue to work
if os.path.isfile(file_path + format_new_ext):
log.info("Book id %d already converted to %s", bookid, format_new_ext)
cur_book = db.session.query(db.Books).filter(db.Books.id == bookid).first()
self.queue[index]["path"] = file_path
self.queue[index]["title"] = cur_book.title
self._handleSuccess()
return file_path + format_new_ext
else:
log.info(
"Book id %d - target format of %s does not exist. Moving forward with convert.",
bookid,
format_new_ext,
)
# check if converter-executable is existing
if not os.path.exists(config.config_converterpath):
# ToDo Text is not translated
self._handleError("Convertertool %s not found" % config.config_converterpath)
return
try:
# check which converter to use kindlegen is "1"
if format_old_ext == ".epub" and format_new_ext == ".mobi":
if config.config_ebookconverter == 1:
"""if os.name == 'nt':
command = config.config_converterpath + u' "' + file_path + u'.epub"'
if sys.version_info < (3, 0):
command = command.encode(sys.getfilesystemencoding())
else:"""
command = [config.config_converterpath, file_path + ".epub"]
quotes = [1]
if config.config_ebookconverter == 2:
# Linux py2.7 encode as list without quotes no empty element for parameters
# linux py3.x no encode and as list without quotes no empty element for parameters
# windows py2.7 encode as string with quotes empty element for parameters is okay
# windows py 3.x no encode and as string with quotes empty element for parameters is okay
# separate handling for windows and linux
quotes = [1, 2]
"""if os.name == 'nt':
command = config.config_converterpath + u' "' + file_path + format_old_ext + u'" "' + \
file_path + format_new_ext + u'" ' + config.config_calibre
if sys.version_info < (3, 0):
command = command.encode(sys.getfilesystemencoding())
else:"""
command = [
config.config_converterpath,
(file_path + format_old_ext),
(file_path + format_new_ext),
]
quotes_index = 3
if config.config_calibre:
parameters = config.config_calibre.split(" ")
for param in parameters:
command.append(param)
quotes.append(quotes_index)
quotes_index += 1
p = process_open(command, quotes)
# p = subprocess.Popen(command, stdout=subprocess.PIPE, universal_newlines=True)
except OSError as e:
self._handleError(_("Ebook-converter failed: %(error)s", error=e))
return
if config.config_ebookconverter == 1:
nextline = p.communicate()[0]
# Format of error message (kindlegen translates its output texts):
# Error(prcgen):E23006: Language not recognized in metadata.The dc:Language field is mandatory.Aborting.
conv_error = re.search(r".*\(.*\):(E\d+):\s(.*)", nextline, re.MULTILINE)
# If error occoures, store error message for logfile
if conv_error:
error_message = _(
"Kindlegen failed with Error %(error)s. Message: %(message)s",
error=conv_error.group(1),
message=conv_error.group(2).strip(),
)
log.debug("convert_kindlegen: %s", nextline)
else:
while p.poll() is None:
nextline = p.stdout.readline()
if os.name == "nt" and sys.version_info < (3, 0):
nextline = nextline.decode("windows-1252")
elif os.name == "posix" and sys.version_info < (3, 0):
nextline = nextline.decode("utf-8")
log.debug(nextline.strip("\r\n"))
# parse progress string from calibre-converter
progress = re.search(r"(\d+)%\s.*", nextline)
if progress:
self.UIqueue[index]["progress"] = progress.group(1) + " %"
# process returncode
check = p.returncode
calibre_traceback = p.stderr.readlines()
for ele in calibre_traceback:
if sys.version_info < (3, 0):
ele = ele.decode("utf-8")
log.debug(ele.strip("\n"))
if not ele.startswith("Traceback") and not ele.startswith(" File"):
error_message = "Calibre failed with error: %s" % ele.strip("\n")
# kindlegen returncodes
# 0 = Info(prcgen):I1036: Mobi file built successfully
# 1 = Info(prcgen):I1037: Mobi file built with WARNINGS!
# 2 = Info(prcgen):I1038: MOBI file could not be generated because of errors!
if (check < 2 and config.config_ebookconverter == 1) or (
check == 0 and config.config_ebookconverter == 2
):
cur_book = db.session.query(db.Books).filter(db.Books.id == bookid).first()
if os.path.isfile(file_path + format_new_ext):
new_format = db.Data(
name=cur_book.data[0].name,
book_format=self.queue[index]["settings"]["new_book_format"].upper(),
book=bookid,
uncompressed_size=os.path.getsize(file_path + format_new_ext),
)
cur_book.data.append(new_format)
db.session.commit()
self.queue[index]["path"] = cur_book.path
self.queue[index]["title"] = cur_book.title
if config.config_use_google_drive:
os.remove(file_path + format_old_ext)
self._handleSuccess()
return file_path + format_new_ext
else:
error_message = format_new_ext.upper() + " format not found on disk"
log.info("ebook converter failed with error while converting book")
if not error_message:
error_message = "Ebook converter failed with unknown error"
self._handleError(error_message)
return
|
https://github.com/janeczku/calibre-web/issues/890
|
Traceback (most recent call last):
File "calibre-web/vendor/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "calibre-web/vendor/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "calibre-web/vendor/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "calibre-web/cps/web.py", line 348, in decorated_view
return func(*args, **kwargs)
File "calibre-web/cps/web.py", line 490, in inner
return f(*args, **kwargs)
File "calibre-web/cps/web.py", line 3868, in upload
"<a href=\"" + url_for('show_book', book_id=book.id) + "\">" + uploadText + "</a>")
File "calibre-web/cps/worker.py", line 406, in add_upload
self.UIqueue[self.current]['formStarttime'] = self.queue[self.current]['starttime']
IndexError: list index out of range
|
IndexError
|
def _send_raw_email(self):
self.doLock.acquire()
index = self.current
self.doLock.release()
self.queue[index]["starttime"] = datetime.now()
self.UIqueue[index]["formStarttime"] = self.queue[index]["starttime"]
self.UIqueue[index]["stat"] = STAT_STARTED
obj = self.queue[index]
# create MIME message
msg = MIMEMultipart()
msg["Subject"] = self.queue[index]["subject"]
msg["Message-Id"] = make_msgid("calibre-web")
msg["Date"] = formatdate(localtime=True)
text = self.queue[index]["text"]
msg.attach(MIMEText(text.encode("UTF-8"), "plain", "UTF-8"))
if obj["attachment"]:
result = get_attachment(obj["filepath"], obj["attachment"])
if result:
msg.attach(result)
else:
self._handleError("Attachment not found")
return
msg["From"] = obj["settings"]["mail_from"]
msg["To"] = obj["recipent"]
use_ssl = int(obj["settings"].get("mail_use_ssl", 0))
try:
# convert MIME message to string
fp = StringIO()
gen = Generator(fp, mangle_from_=False)
gen.flatten(msg)
msg = fp.getvalue()
# send email
timeout = 600 # set timeout to 5mins
# redirect output to logfile on python2 pn python3 debugoutput is caught with overwritten
# _print_debug function
if sys.version_info < (3, 0):
org_smtpstderr = smtplib.stderr
smtplib.stderr = logger.StderrLogger("worker.smtp")
if use_ssl == 2:
self.asyncSMTP = email_SSL(
obj["settings"]["mail_server"], obj["settings"]["mail_port"], timeout
)
else:
self.asyncSMTP = email(
obj["settings"]["mail_server"], obj["settings"]["mail_port"], timeout
)
# link to logginglevel
if logger.is_debug_enabled():
self.asyncSMTP.set_debuglevel(1)
if use_ssl == 1:
self.asyncSMTP.starttls()
if obj["settings"]["mail_password"]:
self.asyncSMTP.login(
str(obj["settings"]["mail_login"]),
str(obj["settings"]["mail_password"]),
)
self.asyncSMTP.sendmail(obj["settings"]["mail_from"], obj["recipent"], msg)
self.asyncSMTP.quit()
self._handleSuccess()
if sys.version_info < (3, 0):
smtplib.stderr = org_smtpstderr
except MemoryError as e:
self._handleError("Error sending email: " + e.message)
return None
except (smtplib.SMTPException, smtplib.SMTPAuthenticationError) as e:
if hasattr(e, "smtp_error"):
text = e.smtp_error.decode("utf-8").replace("\n", ". ")
elif hasattr(e, "message"):
text = e.message
else:
text = ""
self._handleError("Error sending email: " + text)
return None
except socket.error as e:
self._handleError("Error sending email: " + e.strerror)
return None
|
def _send_raw_email(self):
doLock = threading.Lock()
doLock.acquire()
index = self.current
doLock.release()
self.queue[index]["starttime"] = datetime.now()
self.UIqueue[index]["formStarttime"] = self.queue[index]["starttime"]
self.UIqueue[index]["stat"] = STAT_STARTED
obj = self.queue[index]
# create MIME message
msg = MIMEMultipart()
msg["Subject"] = self.queue[index]["subject"]
msg["Message-Id"] = make_msgid("calibre-web")
msg["Date"] = formatdate(localtime=True)
text = self.queue[index]["text"]
msg.attach(MIMEText(text.encode("UTF-8"), "plain", "UTF-8"))
if obj["attachment"]:
result = get_attachment(obj["filepath"], obj["attachment"])
if result:
msg.attach(result)
else:
self._handleError("Attachment not found")
return
msg["From"] = obj["settings"]["mail_from"]
msg["To"] = obj["recipent"]
use_ssl = int(obj["settings"].get("mail_use_ssl", 0))
try:
# convert MIME message to string
fp = StringIO()
gen = Generator(fp, mangle_from_=False)
gen.flatten(msg)
msg = fp.getvalue()
# send email
timeout = 600 # set timeout to 5mins
# redirect output to logfile on python2 pn python3 debugoutput is caught with overwritten
# _print_debug function
if sys.version_info < (3, 0):
org_smtpstderr = smtplib.stderr
smtplib.stderr = logger.StderrLogger("worker.smtp")
if use_ssl == 2:
self.asyncSMTP = email_SSL(
obj["settings"]["mail_server"], obj["settings"]["mail_port"], timeout
)
else:
self.asyncSMTP = email(
obj["settings"]["mail_server"], obj["settings"]["mail_port"], timeout
)
# link to logginglevel
if logger.is_debug_enabled():
self.asyncSMTP.set_debuglevel(1)
if use_ssl == 1:
self.asyncSMTP.starttls()
if obj["settings"]["mail_password"]:
self.asyncSMTP.login(
str(obj["settings"]["mail_login"]),
str(obj["settings"]["mail_password"]),
)
self.asyncSMTP.sendmail(obj["settings"]["mail_from"], obj["recipent"], msg)
self.asyncSMTP.quit()
self._handleSuccess()
if sys.version_info < (3, 0):
smtplib.stderr = org_smtpstderr
except MemoryError as e:
self._handleError("Error sending email: " + e.message)
return None
except (smtplib.SMTPException, smtplib.SMTPAuthenticationError) as e:
if hasattr(e, "smtp_error"):
text = e.smtp_error.decode("utf-8").replace("\n", ". ")
elif hasattr(e, "message"):
text = e.message
else:
text = ""
self._handleError("Error sending email: " + text)
return None
except socket.error as e:
self._handleError("Error sending email: " + e.strerror)
return None
|
https://github.com/janeczku/calibre-web/issues/890
|
Traceback (most recent call last):
File "calibre-web/vendor/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "calibre-web/vendor/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "calibre-web/vendor/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "calibre-web/cps/web.py", line 348, in decorated_view
return func(*args, **kwargs)
File "calibre-web/cps/web.py", line 490, in inner
return f(*args, **kwargs)
File "calibre-web/cps/web.py", line 3868, in upload
"<a href=\"" + url_for('show_book', book_id=book.id) + "\">" + uploadText + "</a>")
File "calibre-web/cps/worker.py", line 406, in add_upload
self.UIqueue[self.current]['formStarttime'] = self.queue[self.current]['starttime']
IndexError: list index out of range
|
IndexError
|
def _handleError(self, error_message):
log.error(error_message)
self.doLock.acquire()
index = self.current
self.doLock.release()
self.UIqueue[index]["stat"] = STAT_FAIL
self.UIqueue[index]["progress"] = "100 %"
self.UIqueue[index]["formRuntime"] = (
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[index]["message"] = error_message
|
def _handleError(self, error_message):
log.error(error_message)
doLock = threading.Lock()
doLock.acquire()
index = self.current
doLock.release()
self.UIqueue[index]["stat"] = STAT_FAIL
self.UIqueue[index]["progress"] = "100 %"
self.UIqueue[index]["formRuntime"] = (
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[index]["message"] = error_message
|
https://github.com/janeczku/calibre-web/issues/890
|
Traceback (most recent call last):
File "calibre-web/vendor/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "calibre-web/vendor/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "calibre-web/vendor/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "calibre-web/cps/web.py", line 348, in decorated_view
return func(*args, **kwargs)
File "calibre-web/cps/web.py", line 490, in inner
return f(*args, **kwargs)
File "calibre-web/cps/web.py", line 3868, in upload
"<a href=\"" + url_for('show_book', book_id=book.id) + "\">" + uploadText + "</a>")
File "calibre-web/cps/worker.py", line 406, in add_upload
self.UIqueue[self.current]['formStarttime'] = self.queue[self.current]['starttime']
IndexError: list index out of range
|
IndexError
|
def _handleSuccess(self):
self.doLock.acquire()
index = self.current
self.doLock.release()
self.UIqueue[index]["stat"] = STAT_FINISH_SUCCESS
self.UIqueue[index]["progress"] = "100 %"
self.UIqueue[index]["formRuntime"] = (
datetime.now() - self.queue[self.current]["starttime"]
)
|
def _handleSuccess(self):
doLock = threading.Lock()
doLock.acquire()
index = self.current
doLock.release()
self.UIqueue[index]["stat"] = STAT_FINISH_SUCCESS
self.UIqueue[index]["progress"] = "100 %"
self.UIqueue[index]["formRuntime"] = (
datetime.now() - self.queue[self.current]["starttime"]
)
|
https://github.com/janeczku/calibre-web/issues/890
|
Traceback (most recent call last):
File "calibre-web/vendor/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "calibre-web/vendor/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "calibre-web/vendor/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "calibre-web/vendor/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "calibre-web/cps/web.py", line 348, in decorated_view
return func(*args, **kwargs)
File "calibre-web/cps/web.py", line 490, in inner
return f(*args, **kwargs)
File "calibre-web/cps/web.py", line 3868, in upload
"<a href=\"" + url_for('show_book', book_id=book.id) + "\">" + uploadText + "</a>")
File "calibre-web/cps/worker.py", line 406, in add_upload
self.UIqueue[self.current]['formStarttime'] = self.queue[self.current]['starttime']
IndexError: list index out of range
|
IndexError
|
def remove_from_shelf(shelf_id, book_id):
xhr = request.headers.get("X-Requested-With") == "XMLHttpRequest"
shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first()
if shelf is None:
log.error("Invalid shelf specified: %s", shelf_id)
if not xhr:
return redirect(url_for("web.index"))
return "Invalid shelf specified", 400
# if shelf is public and use is allowed to edit shelfs, or if shelf is private and user is owner
# allow editing shelfs
# result shelf public user allowed user owner
# false 1 0 x
# true 1 1 x
# true 0 x 1
# false 0 x 0
if (not shelf.is_public and shelf.user_id == int(current_user.id)) or (
shelf.is_public and current_user.role_edit_shelfs()
):
book_shelf = (
ub.session.query(ub.BookShelf)
.filter(ub.BookShelf.shelf == shelf_id, ub.BookShelf.book_id == book_id)
.first()
)
if book_shelf is None:
log.error("Book %s already removed from %s", book_id, shelf)
if not xhr:
return redirect(url_for("web.index"))
return "Book already removed from shelf", 410
ub.session.delete(book_shelf)
ub.session.commit()
if not xhr:
flash(
_("Book has been removed from shelf: %(sname)s", sname=shelf.name),
category="success",
)
if "HTTP_REFERER" in request.environ:
return redirect(request.environ["HTTP_REFERER"])
else:
return redirect(url_for("web.index"))
return "", 204
else:
log.error("User %s not allowed to remove a book from %s", current_user, shelf)
if not xhr:
flash(
_(
"Sorry you are not allowed to remove a book from this shelf: %(sname)s",
sname=shelf.name,
),
category="error",
)
return redirect(url_for("web.index"))
return (
"Sorry you are not allowed to remove a book from this shelf: %s"
% shelf.name,
403,
)
|
def remove_from_shelf(shelf_id, book_id):
xhr = request.headers.get("X-Requested-With") == "XMLHttpRequest"
shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first()
if shelf is None:
log.error("Invalid shelf specified: %s", shelf_id)
if not xhr:
return redirect(url_for("web.index"))
return "Invalid shelf specified", 400
# if shelf is public and use is allowed to edit shelfs, or if shelf is private and user is owner
# allow editing shelfs
# result shelf public user allowed user owner
# false 1 0 x
# true 1 1 x
# true 0 x 1
# false 0 x 0
if (not shelf.is_public and shelf.user_id == int(current_user.id)) or (
shelf.is_public and current_user.role_edit_shelfs()
):
book_shelf = (
ub.session.query(ub.BookShelf)
.filter(ub.BookShelf.shelf == shelf_id, ub.BookShelf.book_id == book_id)
.first()
)
if book_shelf is None:
log.error("Book %s already removed from %s", book_id, shelf)
if not xhr:
return redirect(url_for("web.index"))
return "Book already removed from shelf", 410
ub.session.delete(book_shelf)
ub.session.commit()
if not xhr:
flash(
_("Book has been removed from shelf: %(sname)s", sname=shelf.name),
category="success",
)
return redirect(request.environ["HTTP_REFERER"])
return "", 204
else:
log.error("User %s not allowed to remove a book from %s", current_user, shelf)
if not xhr:
flash(
_(
"Sorry you are not allowed to remove a book from this shelf: %(sname)s",
sname=shelf.name,
),
category="error",
)
return redirect(url_for("web.index"))
return (
"Sorry you are not allowed to remove a book from this shelf: %s"
% shelf.name,
403,
)
|
https://github.com/janeczku/calibre-web/issues/1199
|
Internal Server Error
500 Internal Server Error: The server encountered an internal error and was unable to complete your request. Either the server is overloaded or there is an error in the application.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.6/dist-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1935, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 261, in decorated_view
return func(*args, **kwargs)
File "/app/calibre-web/cps/web.py", line 217, in inner
return f(*args, **kwargs)
File "/app/calibre-web/cps/web.py", line 1071, in send_to_kindle
return redirect(request.environ["HTTP_REFERER"])
KeyError: 'HTTP_REFERER'
|
KeyError
|
def send_to_kindle(book_id, book_format, convert):
if not config.get_mail_server_configured():
flash(_("Please configure the SMTP mail settings first..."), category="error")
elif current_user.kindle_mail:
result = send_mail(
book_id,
book_format,
convert,
current_user.kindle_mail,
config.config_calibre_dir,
current_user.nickname,
)
if result is None:
flash(
_(
"Book successfully queued for sending to %(kindlemail)s",
kindlemail=current_user.kindle_mail,
),
category="success",
)
ub.update_download(book_id, int(current_user.id))
else:
flash(
_("There was an error sending this book: %(res)s", res=result),
category="error",
)
else:
flash(
_("Please configure your kindle e-mail address first..."), category="error"
)
if "HTTP_REFERER" in request.environ:
return redirect(request.environ["HTTP_REFERER"])
else:
return redirect(url_for("web.index"))
|
def send_to_kindle(book_id, book_format, convert):
if not config.get_mail_server_configured():
flash(_("Please configure the SMTP mail settings first..."), category="error")
elif current_user.kindle_mail:
result = send_mail(
book_id,
book_format,
convert,
current_user.kindle_mail,
config.config_calibre_dir,
current_user.nickname,
)
if result is None:
flash(
_(
"Book successfully queued for sending to %(kindlemail)s",
kindlemail=current_user.kindle_mail,
),
category="success",
)
ub.update_download(book_id, int(current_user.id))
else:
flash(
_("There was an error sending this book: %(res)s", res=result),
category="error",
)
else:
flash(
_("Please configure your kindle e-mail address first..."), category="error"
)
return redirect(request.environ["HTTP_REFERER"])
|
https://github.com/janeczku/calibre-web/issues/1199
|
Internal Server Error
500 Internal Server Error: The server encountered an internal error and was unable to complete your request. Either the server is overloaded or there is an error in the application.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.6/dist-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1935, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.6/dist-packages/flask_login/utils.py", line 261, in decorated_view
return func(*args, **kwargs)
File "/app/calibre-web/cps/web.py", line 217, in inner
return f(*args, **kwargs)
File "/app/calibre-web/cps/web.py", line 1071, in send_to_kindle
return redirect(request.environ["HTTP_REFERER"])
KeyError: 'HTTP_REFERER'
|
KeyError
|
def render_task_status(tasklist):
renderedtasklist = list()
for task in tasklist:
if task["user"] == current_user.nickname or current_user.role_admin():
if task["formStarttime"]:
task["starttime"] = format_datetime(
task["formStarttime"], format="short", locale=web.get_locale()
)
else:
if "starttime" not in task:
task["starttime"] = ""
if "formRuntime" not in task:
task["runtime"] = ""
else:
task["runtime"] = format_runtime(task["formRuntime"])
# localize the task status
if isinstance(task["stat"], int):
if task["stat"] == worker.STAT_WAITING:
task["status"] = _("Waiting")
elif task["stat"] == worker.STAT_FAIL:
task["status"] = _("Failed")
elif task["stat"] == worker.STAT_STARTED:
task["status"] = _("Started")
elif task["stat"] == worker.STAT_FINISH_SUCCESS:
task["status"] = _("Finished")
else:
task["status"] = _("Unknown Status")
# localize the task type
if isinstance(task["taskType"], int):
if task["taskType"] == worker.TASK_EMAIL:
task["taskMessage"] = _("E-mail: ") + task["taskMess"]
elif task["taskType"] == worker.TASK_CONVERT:
task["taskMessage"] = _("Convert: ") + task["taskMess"]
elif task["taskType"] == worker.TASK_UPLOAD:
task["taskMessage"] = _("Upload: ") + task["taskMess"]
elif task["taskType"] == worker.TASK_CONVERT_ANY:
task["taskMessage"] = _("Convert: ") + task["taskMess"]
else:
task["taskMessage"] = _("Unknown Task: ") + task["taskMess"]
renderedtasklist.append(task)
return renderedtasklist
|
def render_task_status(tasklist):
renderedtasklist = list()
for task in tasklist:
if task["user"] == current_user.nickname or current_user.role_admin():
if task["formStarttime"]:
task["starttime"] = format_datetime(
task["formStarttime"], format="short", locale=web.get_locale()
)
else:
if "starttime" not in task:
task["starttime"] = ""
task["runtime"] = format_runtime(task["formRuntime"])
# localize the task status
if isinstance(task["stat"], int):
if task["stat"] == worker.STAT_WAITING:
task["status"] = _("Waiting")
elif task["stat"] == worker.STAT_FAIL:
task["status"] = _("Failed")
elif task["stat"] == worker.STAT_STARTED:
task["status"] = _("Started")
elif task["stat"] == worker.STAT_FINISH_SUCCESS:
task["status"] = _("Finished")
else:
task["status"] = _("Unknown Status")
# localize the task type
if isinstance(task["taskType"], int):
if task["taskType"] == worker.TASK_EMAIL:
task["taskMessage"] = _("E-mail: ") + task["taskMess"]
elif task["taskType"] == worker.TASK_CONVERT:
task["taskMessage"] = _("Convert: ") + task["taskMess"]
elif task["taskType"] == worker.TASK_UPLOAD:
task["taskMessage"] = _("Upload: ") + task["taskMess"]
elif task["taskType"] == worker.TASK_CONVERT_ANY:
task["taskMessage"] = _("Convert: ") + task["taskMess"]
else:
task["taskMessage"] = _("Unknown Task: ") + task["taskMess"]
renderedtasklist.append(task)
return renderedtasklist
|
https://github.com/janeczku/calibre-web/issues/959
|
Jul 11 20:01:27 lara python3[24820]: [2019-07-11 20:01:27,270] INFO in worker: Book id 42929 - target format of .mobi does not exist. Moving forward with convert.
Jul 11 20:01:27 lara python3[24820]: [2019-07-11 20:01:27,784] DEBUG in worker: 1% Converting input to HTML...
Jul 11 20:01:27 lara python3[24820]: [2019-07-11 20:01:27,784] DEBUG in worker: InputFormatPlugin: EPUB Input running
Jul 11 20:01:27 lara python3[24820]: [2019-07-11 20:01:27,785] DEBUG in worker: on /sharedfolders/ebooks/Shamash Alidina/Mindfulness voor Dummies (42929)/Mindfulness voor Dummies - Shamash Alidina.epub
Jul 11 20:01:28 lara python3[24820]: [2019-07-11 20:01:28,047] DEBUG in worker: Found HTML cover OEBPS/Text/Section0001.html
Jul 11 20:01:28 lara python3[24820]: [2019-07-11 20:01:28,064] DEBUG in worker: Parsing all content...
Jul 11 20:01:28 lara python3[24820]: [2019-07-11 20:01:28,915] DEBUG in worker: 34% Running transforms on e-book...
Jul 11 20:01:29 lara python3[24820]: [2019-07-11 20:01:29,100] DEBUG in worker: Merging user specified metadata...
Jul 11 20:01:29 lara python3[24820]: [2019-07-11 20:01:29,101] DEBUG in worker: Detecting structure...
Jul 11 20:01:29 lara python3[24820]: [2019-07-11 20:01:29,173] DEBUG in worker: Flattening CSS and remapping font sizes...
Jul 11 20:01:33 lara python3[24820]: [2019-07-11 20:01:33,266] DEBUG in worker: Source base font size is 12.00000pt
Jul 11 20:01:34 lara python3[24820]: [2019-07-11 20:01:34,651] DEBUG in worker: Removing fake margins...
Jul 11 20:01:35 lara python3[24820]: [2019-07-11 20:01:35,409] DEBUG in worker: Cleaning up manifest...
Jul 11 20:01:35 lara python3[24820]: [2019-07-11 20:01:35,410] DEBUG in worker: Trimming unused files from manifest...
Jul 11 20:01:35 lara python3[24820]: [2019-07-11 20:01:35,475] DEBUG in worker: Trimming u'OEBPS/Text/Section0001.html' from manifest
Jul 11 20:01:35 lara python3[24820]: [2019-07-11 20:01:35,601] DEBUG in worker: Creating MOBI Output...
Jul 11 20:01:35 lara python3[24820]: [2019-07-11 20:01:35,602] DEBUG in worker: 67% Running MOBI Output plugin
Jul 11 20:01:35 lara python3[24820]: [2019-07-11 20:01:35,722] DEBUG in worker: Serializing resources...
Jul 11 20:01:36 lara python3[24820]: [2019-07-11 20:01:36,594] DEBUG in worker: Creating MOBI 6 output
Jul 11 20:01:36 lara python3[24820]: [2019-07-11 20:01:36,682] DEBUG in worker: Generating in-line TOC...
Jul 11 20:01:36 lara python3[24820]: [2019-07-11 20:01:36,693] DEBUG in worker: Applying case-transforming CSS...
Jul 11 20:01:39 lara systemd[1]: Started Session 3268 of user root.
Jul 11 20:01:44 lara python3[24820]: [2019-07-11 20:01:44,777] DEBUG in worker: Rasterizing SVG images...
Jul 11 20:01:44 lara python3[24820]: [2019-07-11 20:01:44,810] DEBUG in worker: Converting XHTML to Mobipocket markup...
Jul 11 20:01:57 lara python3[24820]: [2019-07-11 20:01:57,970] DEBUG in worker: Serializing markup content...
Jul 11 20:01:58 lara python3[24820]: [2019-07-11 20:01:58,585] DEBUG in worker: Compressing markup content...
Jul 11 20:02:01 lara python3[24820]: [2019-07-11 20:02:01,547] ERROR in app: Exception on /tasks [GET]
Jul 11 20:02:01 lara python3[24820]: Traceback (most recent call last):
Jul 11 20:02:01 lara python3[24820]: File "/home/calibre-web/calibre-web/vendor/flask/app.py", line 2311, in wsgi_app
Jul 11 20:02:01 lara python3[24820]: response = self.full_dispatch_request()
Jul 11 20:02:01 lara python3[24820]: File "/home/calibre-web/calibre-web/vendor/flask/app.py", line 1834, in full_dispatch_request
Jul 11 20:02:01 lara python3[24820]: rv = self.handle_user_exception(e)
Jul 11 20:02:01 lara python3[24820]: File "/home/calibre-web/calibre-web/vendor/flask/app.py", line 1737, in handle_user_exception
Jul 11 20:02:01 lara python3[24820]: reraise(exc_type, exc_value, tb)
Jul 11 20:02:01 lara python3[24820]: File "/home/calibre-web/calibre-web/vendor/flask/_compat.py", line 36, in reraise
Jul 11 20:02:01 lara python3[24820]: raise value
Jul 11 20:02:01 lara python3[24820]: File "/home/calibre-web/calibre-web/vendor/flask/app.py", line 1832, in full_dispatch_request
Jul 11 20:02:01 lara python3[24820]: rv = self.dispatch_request()
Jul 11 20:02:01 lara python3[24820]: File "/home/calibre-web/calibre-web/vendor/flask/app.py", line 1818, in dispatch_request
Jul 11 20:02:01 lara python3[24820]: return self.view_functions[rule.endpoint](**req.view_args)
Jul 11 20:02:01 lara python3[24820]: File "/home/calibre-web/calibre-web/vendor/flask_login/utils.py", line 261, in decorated_view
Jul 11 20:02:01 lara python3[24820]: return func(*args, **kwargs)
Jul 11 20:02:01 lara python3[24820]: File "/home/calibre-web/calibre-web/cps/web.py", line 1647, in get_tasks_status
Jul 11 20:02:01 lara python3[24820]: answer = helper.render_task_status(tasks)
Jul 11 20:02:01 lara python3[24820]: File "/home/calibre-web/calibre-web/cps/helper.py", line 608, in render_task_status
Jul 11 20:02:01 lara python3[24820]: task['runtime'] = format_runtime(task['formRuntime'])
Jul 11 20:02:01 lara python3[24820]: KeyError: 'formRuntime'
Jul 11 20:02:04 lara python3[24820]: ::ffff:192.168.100.156 - - [2019-07-11 20:02:01] "GET /tasks HTTP/1.1" 500 411 0.013245
|
KeyError
|
def read_book(book_id, book_format):
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
if not book:
flash(
_("Error opening eBook. File does not exist or file is not accessible:"),
category="error",
)
return redirect(url_for("index"))
# check if book has bookmark
lbookmark = None
if current_user.is_authenticated:
lbookmark = (
ub.session.query(ub.Bookmark)
.filter(
ub.and_(
ub.Bookmark.user_id == int(current_user.id),
ub.Bookmark.book_id == book_id,
ub.Bookmark.format == book_format.upper(),
)
)
.first()
)
if book_format.lower() == "epub":
return render_title_template(
"read.html", bookid=book_id, title=_("Read a Book"), bookmark=lbookmark
)
elif book_format.lower() == "pdf":
return render_title_template(
"readpdf.html", pdffile=book_id, title=_("Read a Book")
)
elif book_format.lower() == "txt":
return render_title_template(
"readtxt.html", txtfile=book_id, title=_("Read a Book")
)
else:
book_dir = os.path.join(config.get_main_dir, "cps", "static", str(book_id))
if not os.path.exists(book_dir):
os.mkdir(book_dir)
for fileext in ["cbr", "cbt", "cbz"]:
if book_format.lower() == fileext:
all_name = str(book_id) # + "/" + book.data[0].name + "." + fileext
# tmp_file = os.path.join(book_dir, book.data[0].name) + "." + fileext
# if not os.path.exists(all_name):
# cbr_file = os.path.join(config.config_calibre_dir, book.path, book.data[0].name) + "." + fileext
# copyfile(cbr_file, tmp_file)
return render_title_template(
"readcbr.html",
comicfile=all_name,
title=_("Read a Book"),
extension=fileext,
)
"""if rar_support == True:
extensionList = ["cbr","cbt","cbz"]
else:
extensionList = ["cbt","cbz"]
for fileext in extensionList:
if book_format.lower() == fileext:
return render_title_template('readcbr.html', comicfile=book_id,
extension=fileext, title=_(u"Read a Book"), book=book)
flash(_(u"Error opening eBook. File does not exist or file is not accessible."), category="error")
return redirect(url_for("index"))"""
flash(_("Error opening eBook. Fileformat is not supported."), category="error")
return redirect(url_for("index"))
|
def read_book(book_id, book_format):
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
if not book:
flash(
_("Error opening eBook. File does not exist or file is not accessible:"),
category="error",
)
return redirect(url_for("index"))
# check if book was downloaded before
lbookmark = None
if current_user.is_authenticated:
lbookmark = (
ub.session.query(ub.Bookmark)
.filter(
ub.and_(
ub.Bookmark.user_id == int(current_user.id),
ub.Bookmark.book_id == book_id,
ub.Bookmark.format == book_format.upper(),
)
)
.first()
)
if book_format.lower() == "epub":
return render_title_template(
"read.html", bookid=book_id, title=_("Read a Book"), bookmark=lbookmark
)
elif book_format.lower() == "pdf":
return render_title_template(
"readpdf.html", pdffile=book_id, title=_("Read a Book")
)
elif book_format.lower() == "txt":
return render_title_template(
"readtxt.html", txtfile=book_id, title=_("Read a Book")
)
else:
book_dir = os.path.join(config.get_main_dir, "cps", "static", str(book_id))
if not os.path.exists(book_dir):
os.mkdir(book_dir)
for fileext in ["cbr", "cbt", "cbz"]:
if book_format.lower() == fileext:
all_name = str(book_id) # + "/" + book.data[0].name + "." + fileext
# tmp_file = os.path.join(book_dir, book.data[0].name) + "." + fileext
# if not os.path.exists(all_name):
# cbr_file = os.path.join(config.config_calibre_dir, book.path, book.data[0].name) + "." + fileext
# copyfile(cbr_file, tmp_file)
return render_title_template(
"readcbr.html",
comicfile=all_name,
title=_("Read a Book"),
extension=fileext,
)
"""if rar_support == True:
extensionList = ["cbr","cbt","cbz"]
else:
extensionList = ["cbt","cbz"]
for fileext in extensionList:
if book_format.lower() == fileext:
return render_title_template('readcbr.html', comicfile=book_id,
extension=fileext, title=_(u"Read a Book"), book=book)
flash(_(u"Error opening eBook. File does not exist or file is not accessible."), category="error")
return redirect(url_for("index"))"""
|
https://github.com/janeczku/calibre-web/issues/897
|
[2019-04-29 08:55:11,436] ERROR in app: Exception on /read/4/zip [GET]
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1816, in full_dispatch_request
return self.finalize_request(rv)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1831, in finalize_request
response = self.make_response(rv)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1957, in make_response
'The view function did not return a valid response. The'
TypeError: The view function did not return a valid response. The function either returned None or ended without a return statement.
|
TypeError
|
def check_read_formats(entry):
EXTENSIONS_READER = {"TXT", "PDF", "EPUB", "CBZ", "CBT", "CBR"}
bookformats = list()
if len(entry.data):
for ele in iter(entry.data):
if ele.format in EXTENSIONS_READER:
bookformats.append(ele.format.lower())
return bookformats
|
def check_read_formats(entry):
EXTENSIONS_READER = {"TXT", "PDF", "EPUB", "ZIP", "CBZ", "TAR", "CBT", "RAR", "CBR"}
bookformats = list()
if len(entry.data):
for ele in iter(entry.data):
if ele.format in EXTENSIONS_READER:
bookformats.append(ele.format.lower())
return bookformats
|
https://github.com/janeczku/calibre-web/issues/897
|
[2019-04-29 08:55:11,436] ERROR in app: Exception on /read/4/zip [GET]
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1816, in full_dispatch_request
return self.finalize_request(rv)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1831, in finalize_request
response = self.make_response(rv)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1957, in make_response
'The view function did not return a valid response. The'
TypeError: The view function did not return a valid response. The function either returned None or ended without a return statement.
|
TypeError
|
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime)):
return obj.isoformat()
if isinstance(obj, (timedelta)):
return {
"__type__": "timedelta",
"days": obj.days,
"seconds": obj.seconds,
"microseconds": obj.microseconds,
}
raise TypeError("Type %s not serializable" % type(obj))
|
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime)):
return obj.isoformat()
raise TypeError("Type %s not serializable" % type(obj))
|
https://github.com/janeczku/calibre-web/issues/954
|
[2019-06-28 22:37:50,052] ERROR in app: Exception on /tasks [GET]
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/lib/python2.7/site-packages/flask_login/utils.py", line 261, in decorated_view
return func(*args, **kwargs)
File "/calibre-web/app/cps/web.py", line 1646, in get_tasks_status
tasks=helper.global_WorkerThread.get_taskstatus()
File "/calibre-web/app/cps/worker.py", line 225, in get_taskstatus
datetime.now() - self.queue[self.current]['starttime'])
File "/calibre-web/app/cps/worker.py", line 489, in _formatRuntime
if int(v) > 0:
ValueError: invalid literal for int() with base 10: '1 day, 5'
::ffff:172.17.0.1 - - [2019-06-28 22:37:50] "GET /tasks HTTP/1.1" 500 411 0.011915
|
ValueError
|
def render_task_status(tasklist):
renderedtasklist = list()
for task in tasklist:
if task["user"] == current_user.nickname or current_user.role_admin():
if task["formStarttime"]:
task["starttime"] = format_datetime(
task["formStarttime"], format="short", locale=web.get_locale()
)
else:
if "starttime" not in task:
task["starttime"] = ""
task["runtime"] = format_runtime(task["formRuntime"])
# localize the task status
if isinstance(task["stat"], int):
if task["stat"] == worker.STAT_WAITING:
task["status"] = _("Waiting")
elif task["stat"] == worker.STAT_FAIL:
task["status"] = _("Failed")
elif task["stat"] == worker.STAT_STARTED:
task["status"] = _("Started")
elif task["stat"] == worker.STAT_FINISH_SUCCESS:
task["status"] = _("Finished")
else:
task["status"] = _("Unknown Status")
# localize the task type
if isinstance(task["taskType"], int):
if task["taskType"] == worker.TASK_EMAIL:
task["taskMessage"] = _("E-mail: ") + task["taskMess"]
elif task["taskType"] == worker.TASK_CONVERT:
task["taskMessage"] = _("Convert: ") + task["taskMess"]
elif task["taskType"] == worker.TASK_UPLOAD:
task["taskMessage"] = _("Upload: ") + task["taskMess"]
elif task["taskType"] == worker.TASK_CONVERT_ANY:
task["taskMessage"] = _("Convert: ") + task["taskMess"]
else:
task["taskMessage"] = _("Unknown Task: ") + task["taskMess"]
renderedtasklist.append(task)
return renderedtasklist
|
def render_task_status(tasklist):
renderedtasklist = list()
for task in tasklist:
if task["user"] == current_user.nickname or current_user.role_admin():
if task["formStarttime"]:
task["starttime"] = format_datetime(
task["formStarttime"], format="short", locale=web.get_locale()
)
else:
if "starttime" not in task:
task["starttime"] = ""
# localize the task status
if isinstance(task["stat"], int):
if task["stat"] == worker.STAT_WAITING:
task["status"] = _("Waiting")
elif task["stat"] == worker.STAT_FAIL:
task["status"] = _("Failed")
elif task["stat"] == worker.STAT_STARTED:
task["status"] = _("Started")
elif task["stat"] == worker.STAT_FINISH_SUCCESS:
task["status"] = _("Finished")
else:
task["status"] = _("Unknown Status")
# localize the task type
if isinstance(task["taskType"], int):
if task["taskType"] == worker.TASK_EMAIL:
task["taskMessage"] = _("E-mail: ") + task["taskMess"]
elif task["taskType"] == worker.TASK_CONVERT:
task["taskMessage"] = _("Convert: ") + task["taskMess"]
elif task["taskType"] == worker.TASK_UPLOAD:
task["taskMessage"] = _("Upload: ") + task["taskMess"]
elif task["taskType"] == worker.TASK_CONVERT_ANY:
task["taskMessage"] = _("Convert: ") + task["taskMess"]
else:
task["taskMessage"] = _("Unknown Task: ") + task["taskMess"]
renderedtasklist.append(task)
return renderedtasklist
|
https://github.com/janeczku/calibre-web/issues/954
|
[2019-06-28 22:37:50,052] ERROR in app: Exception on /tasks [GET]
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/lib/python2.7/site-packages/flask_login/utils.py", line 261, in decorated_view
return func(*args, **kwargs)
File "/calibre-web/app/cps/web.py", line 1646, in get_tasks_status
tasks=helper.global_WorkerThread.get_taskstatus()
File "/calibre-web/app/cps/worker.py", line 225, in get_taskstatus
datetime.now() - self.queue[self.current]['starttime'])
File "/calibre-web/app/cps/worker.py", line 489, in _formatRuntime
if int(v) > 0:
ValueError: invalid literal for int() with base 10: '1 day, 5'
::ffff:172.17.0.1 - - [2019-06-28 22:37:50] "GET /tasks HTTP/1.1" 500 411 0.011915
|
ValueError
|
def new_user():
content = ub.User()
languages = speaking_language()
translations = [LC("en")] + babel.list_translations()
if request.method == "POST":
to_save = request.form.to_dict()
content.default_language = to_save["default_language"]
content.mature_content = "show_mature_content" in to_save
if "locale" in to_save:
content.locale = to_save["locale"]
content.sidebar_view = 0
if "show_random" in to_save:
content.sidebar_view += ub.SIDEBAR_RANDOM
if "show_language" in to_save:
content.sidebar_view += ub.SIDEBAR_LANGUAGE
if "show_series" in to_save:
content.sidebar_view += ub.SIDEBAR_SERIES
if "show_category" in to_save:
content.sidebar_view += ub.SIDEBAR_CATEGORY
if "show_hot" in to_save:
content.sidebar_view += ub.SIDEBAR_HOT
if "show_read_and_unread" in to_save:
content.sidebar_view += ub.SIDEBAR_READ_AND_UNREAD
if "show_best_rated" in to_save:
content.sidebar_view += ub.SIDEBAR_BEST_RATED
if "show_author" in to_save:
content.sidebar_view += ub.SIDEBAR_AUTHOR
if "show_publisher" in to_save:
content.sidebar_view += ub.SIDEBAR_PUBLISHER
if "show_detail_random" in to_save:
content.sidebar_view += ub.DETAIL_RANDOM
if "show_sorted" in to_save:
content.sidebar_view += ub.SIDEBAR_SORTED
if "show_recent" in to_save:
content.sidebar_view += ub.SIDEBAR_RECENT
content.role = 0
if "admin_role" in to_save:
content.role = content.role + ub.ROLE_ADMIN
if "download_role" in to_save:
content.role = content.role + ub.ROLE_DOWNLOAD
if "upload_role" in to_save:
content.role = content.role + ub.ROLE_UPLOAD
if "edit_role" in to_save:
content.role = content.role + ub.ROLE_EDIT
if "delete_role" in to_save:
content.role = content.role + ub.ROLE_DELETE_BOOKS
if "passwd_role" in to_save:
content.role = content.role + ub.ROLE_PASSWD
if "edit_shelf_role" in to_save:
content.role = content.role + ub.ROLE_EDIT_SHELFS
if not to_save["nickname"] or not to_save["email"] or not to_save["password"]:
flash(_("Please fill out all fields!"), category="error")
return render_title_template(
"user_edit.html",
new_user=1,
content=content,
translations=translations,
title=_("Add new user"),
)
content.password = generate_password_hash(to_save["password"])
existing_user = (
ub.session.query(ub.User)
.filter(func.lower(ub.User.nickname) == to_save["nickname"].lower())
.first()
)
existing_email = (
ub.session.query(ub.User)
.filter(ub.User.email == to_save["email"].lower())
.first()
)
if not existing_user and not existing_email:
content.nickname = to_save["nickname"]
if config.config_public_reg and not check_valid_domain(to_save["email"]):
flash(_("E-mail is not from valid domain"), category="error")
return render_title_template(
"user_edit.html",
new_user=1,
content=content,
translations=translations,
title=_("Add new user"),
)
else:
content.email = to_save["email"]
else:
flash(
_("Found an existing account for this e-mail address or nickname."),
category="error",
)
return render_title_template(
"user_edit.html",
new_user=1,
content=content,
translations=translations,
languages=languages,
title=_("Add new user"),
page="newuser",
)
try:
ub.session.add(content)
ub.session.commit()
flash(
_("User '%(user)s' created", user=content.nickname), category="success"
)
return redirect(url_for("admin"))
except IntegrityError:
ub.session.rollback()
flash(
_("Found an existing account for this e-mail address or nickname."),
category="error",
)
else:
content.role = config.config_default_role
content.sidebar_view = config.config_default_show
content.mature_content = bool(config.config_default_show & ub.MATURE_CONTENT)
return render_title_template(
"user_edit.html",
new_user=1,
content=content,
translations=translations,
languages=languages,
title=_("Add new user"),
page="newuser",
)
|
def new_user():
content = ub.User()
languages = speaking_language()
translations = [LC("en")] + babel.list_translations()
if request.method == "POST":
to_save = request.form.to_dict()
content.default_language = to_save["default_language"]
content.mature_content = "show_mature_content" in to_save
if "locale" in to_save:
content.locale = to_save["locale"]
content.sidebar_view = 0
if "show_random" in to_save:
content.sidebar_view += ub.SIDEBAR_RANDOM
if "show_language" in to_save:
content.sidebar_view += ub.SIDEBAR_LANGUAGE
if "show_series" in to_save:
content.sidebar_view += ub.SIDEBAR_SERIES
if "show_category" in to_save:
content.sidebar_view += ub.SIDEBAR_CATEGORY
if "show_hot" in to_save:
content.sidebar_view += ub.SIDEBAR_HOT
if "show_read_and_unread" in to_save:
content.sidebar_view += ub.SIDEBAR_READ_AND_UNREAD
if "show_best_rated" in to_save:
content.sidebar_view += ub.SIDEBAR_BEST_RATED
if "show_author" in to_save:
content.sidebar_view += ub.SIDEBAR_AUTHOR
if "show_publisher" in to_save:
content.sidebar_view += ub.SIDEBAR_PUBLISHER
if "show_detail_random" in to_save:
content.sidebar_view += ub.DETAIL_RANDOM
if "show_sorted" in to_save:
content.sidebar_view += ub.SIDEBAR_SORTED
if "show_recent" in to_save:
content.sidebar_view += ub.SIDEBAR_RECENT
content.role = 0
if "admin_role" in to_save:
content.role = content.role + ub.ROLE_ADMIN
if "download_role" in to_save:
content.role = content.role + ub.ROLE_DOWNLOAD
if "upload_role" in to_save:
content.role = content.role + ub.ROLE_UPLOAD
if "edit_role" in to_save:
content.role = content.role + ub.ROLE_EDIT
if "delete_role" in to_save:
content.role = content.role + ub.ROLE_DELETE_BOOKS
if "passwd_role" in to_save:
content.role = content.role + ub.ROLE_PASSWD
if "edit_shelf_role" in to_save:
content.role = content.role + ub.ROLE_EDIT_SHELFS
if not to_save["nickname"] or not to_save["email"] or not to_save["password"]:
flash(_("Please fill out all fields!"), category="error")
return render_title_template(
"user_edit.html",
new_user=1,
content=content,
translations=translations,
title=_("Add new user"),
)
content.password = generate_password_hash(to_save["password"])
content.nickname = to_save["nickname"]
if config.config_public_reg and not check_valid_domain(to_save["email"]):
flash(_("E-mail is not from valid domain"), category="error")
return render_title_template(
"user_edit.html",
new_user=1,
content=content,
translations=translations,
title=_("Add new user"),
)
else:
content.email = to_save["email"]
try:
ub.session.add(content)
ub.session.commit()
flash(
_("User '%(user)s' created", user=content.nickname), category="success"
)
return redirect(url_for("admin"))
except IntegrityError:
ub.session.rollback()
flash(
_("Found an existing account for this e-mail address or nickname."),
category="error",
)
else:
content.role = config.config_default_role
content.sidebar_view = config.config_default_show
content.mature_content = bool(config.config_default_show & ub.MATURE_CONTENT)
return render_title_template(
"user_edit.html",
new_user=1,
content=content,
translations=translations,
languages=languages,
title=_("Add new user"),
page="newuser",
)
|
https://github.com/janeczku/calibre-web/issues/954
|
[2019-06-28 22:37:50,052] ERROR in app: Exception on /tasks [GET]
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/lib/python2.7/site-packages/flask_login/utils.py", line 261, in decorated_view
return func(*args, **kwargs)
File "/calibre-web/app/cps/web.py", line 1646, in get_tasks_status
tasks=helper.global_WorkerThread.get_taskstatus()
File "/calibre-web/app/cps/worker.py", line 225, in get_taskstatus
datetime.now() - self.queue[self.current]['starttime'])
File "/calibre-web/app/cps/worker.py", line 489, in _formatRuntime
if int(v) > 0:
ValueError: invalid literal for int() with base 10: '1 day, 5'
::ffff:172.17.0.1 - - [2019-06-28 22:37:50] "GET /tasks HTTP/1.1" 500 411 0.011915
|
ValueError
|
def edit_user(user_id):
content = ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first() # type: ub.User
downloads = list()
languages = speaking_language()
translations = babel.list_translations() + [LC("en")]
for book in content.downloads:
downloadbook = (
db.session.query(db.Books).filter(db.Books.id == book.book_id).first()
)
if downloadbook:
downloads.append(downloadbook)
else:
ub.delete_download(book.book_id)
# ub.session.query(ub.Downloads).filter(book.book_id == ub.Downloads.book_id).delete()
# ub.session.commit()
if request.method == "POST":
to_save = request.form.to_dict()
if "delete" in to_save:
if (
ub.session.query(ub.User)
.filter(
and_(
ub.User.role.op("&")(ub.ROLE_ADMIN) == ub.ROLE_ADMIN,
ub.User.id != content.id,
)
)
.count()
):
ub.session.query(ub.User).filter(ub.User.id == content.id).delete()
ub.session.commit()
flash(
_("User '%(nick)s' deleted", nick=content.nickname),
category="success",
)
return redirect(url_for("admin"))
else:
flash(
_(
"No admin user remaining, can't delete user",
nick=content.nickname,
),
category="error",
)
return redirect(url_for("admin"))
else:
if "password" in to_save and to_save["password"]:
content.password = generate_password_hash(to_save["password"])
if "admin_role" in to_save and not content.role_admin():
content.role = content.role + ub.ROLE_ADMIN
elif "admin_role" not in to_save and content.role_admin():
content.role = content.role - ub.ROLE_ADMIN
if "download_role" in to_save and not content.role_download():
content.role = content.role + ub.ROLE_DOWNLOAD
elif "download_role" not in to_save and content.role_download():
content.role = content.role - ub.ROLE_DOWNLOAD
if "upload_role" in to_save and not content.role_upload():
content.role = content.role + ub.ROLE_UPLOAD
elif "upload_role" not in to_save and content.role_upload():
content.role = content.role - ub.ROLE_UPLOAD
if "edit_role" in to_save and not content.role_edit():
content.role = content.role + ub.ROLE_EDIT
elif "edit_role" not in to_save and content.role_edit():
content.role = content.role - ub.ROLE_EDIT
if "delete_role" in to_save and not content.role_delete_books():
content.role = content.role + ub.ROLE_DELETE_BOOKS
elif "delete_role" not in to_save and content.role_delete_books():
content.role = content.role - ub.ROLE_DELETE_BOOKS
if "passwd_role" in to_save and not content.role_passwd():
content.role = content.role + ub.ROLE_PASSWD
elif "passwd_role" not in to_save and content.role_passwd():
content.role = content.role - ub.ROLE_PASSWD
if "edit_shelf_role" in to_save and not content.role_edit_shelfs():
content.role = content.role + ub.ROLE_EDIT_SHELFS
elif "edit_shelf_role" not in to_save and content.role_edit_shelfs():
content.role = content.role - ub.ROLE_EDIT_SHELFS
if "show_random" in to_save and not content.show_random_books():
content.sidebar_view += ub.SIDEBAR_RANDOM
elif "show_random" not in to_save and content.show_random_books():
content.sidebar_view -= ub.SIDEBAR_RANDOM
if "show_language" in to_save and not content.show_language():
content.sidebar_view += ub.SIDEBAR_LANGUAGE
elif "show_language" not in to_save and content.show_language():
content.sidebar_view -= ub.SIDEBAR_LANGUAGE
if "show_series" in to_save and not content.show_series():
content.sidebar_view += ub.SIDEBAR_SERIES
elif "show_series" not in to_save and content.show_series():
content.sidebar_view -= ub.SIDEBAR_SERIES
if "show_category" in to_save and not content.show_category():
content.sidebar_view += ub.SIDEBAR_CATEGORY
elif "show_category" not in to_save and content.show_category():
content.sidebar_view -= ub.SIDEBAR_CATEGORY
if "show_recent" in to_save and not content.show_recent():
content.sidebar_view += ub.SIDEBAR_RECENT
elif "show_recent" not in to_save and content.show_recent():
content.sidebar_view -= ub.SIDEBAR_RECENT
if "show_sorted" in to_save and not content.show_sorted():
content.sidebar_view += ub.SIDEBAR_SORTED
elif "show_sorted" not in to_save and content.show_sorted():
content.sidebar_view -= ub.SIDEBAR_SORTED
if "show_publisher" in to_save and not content.show_publisher():
content.sidebar_view += ub.SIDEBAR_PUBLISHER
elif "show_publisher" not in to_save and content.show_publisher():
content.sidebar_view -= ub.SIDEBAR_PUBLISHER
if "show_hot" in to_save and not content.show_hot_books():
content.sidebar_view += ub.SIDEBAR_HOT
elif "show_hot" not in to_save and content.show_hot_books():
content.sidebar_view -= ub.SIDEBAR_HOT
if "show_best_rated" in to_save and not content.show_best_rated_books():
content.sidebar_view += ub.SIDEBAR_BEST_RATED
elif "show_best_rated" not in to_save and content.show_best_rated_books():
content.sidebar_view -= ub.SIDEBAR_BEST_RATED
if "show_read_and_unread" in to_save and not content.show_read_and_unread():
content.sidebar_view += ub.SIDEBAR_READ_AND_UNREAD
elif (
"show_read_and_unread" not in to_save and content.show_read_and_unread()
):
content.sidebar_view -= ub.SIDEBAR_READ_AND_UNREAD
if "show_author" in to_save and not content.show_author():
content.sidebar_view += ub.SIDEBAR_AUTHOR
elif "show_author" not in to_save and content.show_author():
content.sidebar_view -= ub.SIDEBAR_AUTHOR
if "show_detail_random" in to_save and not content.show_detail_random():
content.sidebar_view += ub.DETAIL_RANDOM
elif "show_detail_random" not in to_save and content.show_detail_random():
content.sidebar_view -= ub.DETAIL_RANDOM
content.mature_content = "show_mature_content" in to_save
if "default_language" in to_save:
content.default_language = to_save["default_language"]
if "locale" in to_save and to_save["locale"]:
content.locale = to_save["locale"]
if to_save["email"] and to_save["email"] != content.email:
existing_email = (
ub.session.query(ub.User)
.filter(ub.User.email == to_save["email"].lower())
.first()
)
if not existing_email:
content.email = to_save["email"]
else:
flash(
_("Found an existing account for this e-mail address."),
category="error",
)
return render_title_template(
"user_edit.html",
translations=translations,
languages=languages,
new_user=0,
content=content,
downloads=downloads,
title=_("Edit User %(nick)s", nick=content.nickname),
page="edituser",
)
if (
"kindle_mail" in to_save
and to_save["kindle_mail"] != content.kindle_mail
):
content.kindle_mail = to_save["kindle_mail"]
try:
ub.session.commit()
flash(
_("User '%(nick)s' updated", nick=content.nickname), category="success"
)
except IntegrityError as e:
ub.session.rollback()
print(e)
flash(_("An unknown error occured."), category="error")
return render_title_template(
"user_edit.html",
translations=translations,
languages=languages,
new_user=0,
content=content,
downloads=downloads,
title=_("Edit User %(nick)s", nick=content.nickname),
page="edituser",
)
|
def edit_user(user_id):
content = ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first() # type: ub.User
downloads = list()
languages = speaking_language()
translations = babel.list_translations() + [LC("en")]
for book in content.downloads:
downloadbook = (
db.session.query(db.Books).filter(db.Books.id == book.book_id).first()
)
if downloadbook:
downloads.append(downloadbook)
else:
ub.delete_download(book.book_id)
# ub.session.query(ub.Downloads).filter(book.book_id == ub.Downloads.book_id).delete()
# ub.session.commit()
if request.method == "POST":
to_save = request.form.to_dict()
if "delete" in to_save:
if (
ub.session.query(ub.User)
.filter(
and_(
ub.User.role.op("&")(ub.ROLE_ADMIN) == ub.ROLE_ADMIN,
ub.User.id != content.id,
)
)
.count()
):
ub.session.query(ub.User).filter(ub.User.id == content.id).delete()
ub.session.commit()
flash(
_("User '%(nick)s' deleted", nick=content.nickname),
category="success",
)
return redirect(url_for("admin"))
else:
flash(
_(
"No admin user remaining, can't delete user",
nick=content.nickname,
),
category="error",
)
return redirect(url_for("admin"))
else:
if "password" in to_save and to_save["password"]:
content.password = generate_password_hash(to_save["password"])
if "admin_role" in to_save and not content.role_admin():
content.role = content.role + ub.ROLE_ADMIN
elif "admin_role" not in to_save and content.role_admin():
content.role = content.role - ub.ROLE_ADMIN
if "download_role" in to_save and not content.role_download():
content.role = content.role + ub.ROLE_DOWNLOAD
elif "download_role" not in to_save and content.role_download():
content.role = content.role - ub.ROLE_DOWNLOAD
if "upload_role" in to_save and not content.role_upload():
content.role = content.role + ub.ROLE_UPLOAD
elif "upload_role" not in to_save and content.role_upload():
content.role = content.role - ub.ROLE_UPLOAD
if "edit_role" in to_save and not content.role_edit():
content.role = content.role + ub.ROLE_EDIT
elif "edit_role" not in to_save and content.role_edit():
content.role = content.role - ub.ROLE_EDIT
if "delete_role" in to_save and not content.role_delete_books():
content.role = content.role + ub.ROLE_DELETE_BOOKS
elif "delete_role" not in to_save and content.role_delete_books():
content.role = content.role - ub.ROLE_DELETE_BOOKS
if "passwd_role" in to_save and not content.role_passwd():
content.role = content.role + ub.ROLE_PASSWD
elif "passwd_role" not in to_save and content.role_passwd():
content.role = content.role - ub.ROLE_PASSWD
if "edit_shelf_role" in to_save and not content.role_edit_shelfs():
content.role = content.role + ub.ROLE_EDIT_SHELFS
elif "edit_shelf_role" not in to_save and content.role_edit_shelfs():
content.role = content.role - ub.ROLE_EDIT_SHELFS
if "show_random" in to_save and not content.show_random_books():
content.sidebar_view += ub.SIDEBAR_RANDOM
elif "show_random" not in to_save and content.show_random_books():
content.sidebar_view -= ub.SIDEBAR_RANDOM
if "show_language" in to_save and not content.show_language():
content.sidebar_view += ub.SIDEBAR_LANGUAGE
elif "show_language" not in to_save and content.show_language():
content.sidebar_view -= ub.SIDEBAR_LANGUAGE
if "show_series" in to_save and not content.show_series():
content.sidebar_view += ub.SIDEBAR_SERIES
elif "show_series" not in to_save and content.show_series():
content.sidebar_view -= ub.SIDEBAR_SERIES
if "show_category" in to_save and not content.show_category():
content.sidebar_view += ub.SIDEBAR_CATEGORY
elif "show_category" not in to_save and content.show_category():
content.sidebar_view -= ub.SIDEBAR_CATEGORY
if "show_recent" in to_save and not content.show_recent():
content.sidebar_view += ub.SIDEBAR_RECENT
elif "show_recent" not in to_save and content.show_recent():
content.sidebar_view -= ub.SIDEBAR_RECENT
if "show_sorted" in to_save and not content.show_sorted():
content.sidebar_view += ub.SIDEBAR_SORTED
elif "show_sorted" not in to_save and content.show_sorted():
content.sidebar_view -= ub.SIDEBAR_SORTED
if "show_publisher" in to_save and not content.show_publisher():
content.sidebar_view += ub.SIDEBAR_PUBLISHER
elif "show_publisher" not in to_save and content.show_publisher():
content.sidebar_view -= ub.SIDEBAR_PUBLISHER
if "show_hot" in to_save and not content.show_hot_books():
content.sidebar_view += ub.SIDEBAR_HOT
elif "show_hot" not in to_save and content.show_hot_books():
content.sidebar_view -= ub.SIDEBAR_HOT
if "show_best_rated" in to_save and not content.show_best_rated_books():
content.sidebar_view += ub.SIDEBAR_BEST_RATED
elif "show_best_rated" not in to_save and content.show_best_rated_books():
content.sidebar_view -= ub.SIDEBAR_BEST_RATED
if "show_read_and_unread" in to_save and not content.show_read_and_unread():
content.sidebar_view += ub.SIDEBAR_READ_AND_UNREAD
elif (
"show_read_and_unread" not in to_save and content.show_read_and_unread()
):
content.sidebar_view -= ub.SIDEBAR_READ_AND_UNREAD
if "show_author" in to_save and not content.show_author():
content.sidebar_view += ub.SIDEBAR_AUTHOR
elif "show_author" not in to_save and content.show_author():
content.sidebar_view -= ub.SIDEBAR_AUTHOR
if "show_detail_random" in to_save and not content.show_detail_random():
content.sidebar_view += ub.DETAIL_RANDOM
elif "show_detail_random" not in to_save and content.show_detail_random():
content.sidebar_view -= ub.DETAIL_RANDOM
content.mature_content = "show_mature_content" in to_save
if "default_language" in to_save:
content.default_language = to_save["default_language"]
if "locale" in to_save and to_save["locale"]:
content.locale = to_save["locale"]
if to_save["email"] and to_save["email"] != content.email:
content.email = to_save["email"]
if (
"kindle_mail" in to_save
and to_save["kindle_mail"] != content.kindle_mail
):
content.kindle_mail = to_save["kindle_mail"]
try:
ub.session.commit()
flash(
_("User '%(nick)s' updated", nick=content.nickname), category="success"
)
except IntegrityError:
ub.session.rollback()
flash(_("An unknown error occured."), category="error")
return render_title_template(
"user_edit.html",
translations=translations,
languages=languages,
new_user=0,
content=content,
downloads=downloads,
title=_("Edit User %(nick)s", nick=content.nickname),
page="edituser",
)
|
https://github.com/janeczku/calibre-web/issues/954
|
[2019-06-28 22:37:50,052] ERROR in app: Exception on /tasks [GET]
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/lib/python2.7/site-packages/flask_login/utils.py", line 261, in decorated_view
return func(*args, **kwargs)
File "/calibre-web/app/cps/web.py", line 1646, in get_tasks_status
tasks=helper.global_WorkerThread.get_taskstatus()
File "/calibre-web/app/cps/worker.py", line 225, in get_taskstatus
datetime.now() - self.queue[self.current]['starttime'])
File "/calibre-web/app/cps/worker.py", line 489, in _formatRuntime
if int(v) > 0:
ValueError: invalid literal for int() with base 10: '1 day, 5'
::ffff:172.17.0.1 - - [2019-06-28 22:37:50] "GET /tasks HTTP/1.1" 500 411 0.011915
|
ValueError
|
def get_taskstatus(self):
if self.current < len(self.queue):
if self.UIqueue[self.current]["stat"] == STAT_STARTED:
if self.queue[self.current]["taskType"] == TASK_EMAIL:
self.UIqueue[self.current]["progress"] = self.get_send_status()
self.UIqueue[self.current]["formRuntime"] = (
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["rt"] = (
self.UIqueue[self.current]["formRuntime"].days * 24 * 60
+ self.UIqueue[self.current]["formRuntime"].seconds
+ self.UIqueue[self.current]["formRuntime"].microseconds
)
return self.UIqueue
|
def get_taskstatus(self):
if self.current < len(self.queue):
if self.UIqueue[self.current]["stat"] == STAT_STARTED:
if self.queue[self.current]["taskType"] == TASK_EMAIL:
self.UIqueue[self.current]["progress"] = self.get_send_status()
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
return self.UIqueue
|
https://github.com/janeczku/calibre-web/issues/954
|
[2019-06-28 22:37:50,052] ERROR in app: Exception on /tasks [GET]
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/lib/python2.7/site-packages/flask_login/utils.py", line 261, in decorated_view
return func(*args, **kwargs)
File "/calibre-web/app/cps/web.py", line 1646, in get_tasks_status
tasks=helper.global_WorkerThread.get_taskstatus()
File "/calibre-web/app/cps/worker.py", line 225, in get_taskstatus
datetime.now() - self.queue[self.current]['starttime'])
File "/calibre-web/app/cps/worker.py", line 489, in _formatRuntime
if int(v) > 0:
ValueError: invalid literal for int() with base 10: '1 day, 5'
::ffff:172.17.0.1 - - [2019-06-28 22:37:50] "GET /tasks HTTP/1.1" 500 411 0.011915
|
ValueError
|
def _convert_ebook_format(self):
error_message = None
file_path = self.queue[self.current]["file_path"]
bookid = self.queue[self.current]["bookid"]
format_old_ext = (
"." + self.queue[self.current]["settings"]["old_book_format"].lower()
)
format_new_ext = (
"." + self.queue[self.current]["settings"]["new_book_format"].lower()
)
# check to see if destination format already exists -
# if it does - mark the conversion task as complete and return a success
# this will allow send to kindle workflow to continue to work
if os.path.isfile(file_path + format_new_ext):
web.app.logger.info(
"Book id %d already converted to %s", bookid, format_new_ext
)
cur_book = (
web.db.session.query(web.db.Books).filter(web.db.Books.id == bookid).first()
)
self.queue[self.current]["path"] = file_path
self.queue[self.current]["title"] = cur_book.title
self._handleSuccess()
return file_path + format_new_ext
else:
web.app.logger.info(
"Book id %d - target format of %s does not exist. Moving forward with convert.",
bookid,
format_new_ext,
)
# check if converter-executable is existing
if not os.path.exists(web.ub.config.config_converterpath):
# ToDo Text is not translated
self._handleError(
"Convertertool %s not found" % web.ub.config.config_converterpath
)
return
try:
# check which converter to use kindlegen is "1"
if format_old_ext == ".epub" and format_new_ext == ".mobi":
if web.ub.config.config_ebookconverter == 1:
if os.name == "nt":
command = (
web.ub.config.config_converterpath + ' "' + file_path + '.epub"'
)
if sys.version_info < (3, 0):
command = command.encode(sys.getfilesystemencoding())
else:
command = [web.ub.config.config_converterpath, file_path + ".epub"]
if sys.version_info < (3, 0):
command = [
x.encode(sys.getfilesystemencoding()) for x in command
]
if web.ub.config.config_ebookconverter == 2:
# Linux py2.7 encode as list without quotes no empty element for parameters
# linux py3.x no encode and as list without quotes no empty element for parameters
# windows py2.7 encode as string with quotes empty element for parameters is okay
# windows py 3.x no encode and as string with quotes empty element for parameters is okay
# separate handling for windows and linux
if os.name == "nt":
command = (
web.ub.config.config_converterpath
+ ' "'
+ file_path
+ format_old_ext
+ '" "'
+ file_path
+ format_new_ext
+ '" '
+ web.ub.config.config_calibre
)
if sys.version_info < (3, 0):
command = command.encode(sys.getfilesystemencoding())
else:
command = [
web.ub.config.config_converterpath,
(file_path + format_old_ext),
(file_path + format_new_ext),
]
if web.ub.config.config_calibre:
parameters = web.ub.config.config_calibre.split(" ")
for param in parameters:
command.append(param)
if sys.version_info < (3, 0):
command = [x.encode(sys.getfilesystemencoding()) for x in command]
p = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except OSError as e:
self._handleError(_("Ebook-converter failed: %(error)s", error=e))
return
if web.ub.config.config_ebookconverter == 1:
nextline = p.communicate()[0]
# Format of error message (kindlegen translates its output texts):
# Error(prcgen):E23006: Language not recognized in metadata.The dc:Language field is mandatory.Aborting.
conv_error = re.search(".*\(.*\):(E\d+):\s(.*)", nextline, re.MULTILINE)
# If error occoures, store error message for logfile
if conv_error:
error_message = _(
"Kindlegen failed with Error %(error)s. Message: %(message)s",
error=conv_error.group(1),
message=conv_error.group(2).strip(),
)
web.app.logger.debug("convert_kindlegen: " + nextline)
else:
while p.poll() is None:
nextline = p.stdout.readline()
if os.name == "nt" and sys.version_info < (3, 0):
nextline = nextline.decode("windows-1252")
web.app.logger.debug(nextline.strip("\r\n"))
# parse progress string from calibre-converter
progress = re.search("(\d+)%\s.*", nextline)
if progress:
self.UIqueue[self.current]["progress"] = progress.group(1) + " %"
# process returncode
check = p.returncode
calibre_traceback = p.stderr.readlines()
for ele in calibre_traceback:
web.app.logger.debug(ele.strip("\n"))
if not ele.startswith("Traceback") and not ele.startswith(" File"):
error_message = "Calibre failed with error: %s" % ele.strip("\n")
# kindlegen returncodes
# 0 = Info(prcgen):I1036: Mobi file built successfully
# 1 = Info(prcgen):I1037: Mobi file built with WARNINGS!
# 2 = Info(prcgen):I1038: MOBI file could not be generated because of errors!
if (check < 2 and web.ub.config.config_ebookconverter == 1) or (
check == 0 and web.ub.config.config_ebookconverter == 2
):
cur_book = (
web.db.session.query(web.db.Books).filter(web.db.Books.id == bookid).first()
)
if os.path.isfile(file_path + format_new_ext):
new_format = web.db.Data(
name=cur_book.data[0].name,
book_format=self.queue[self.current]["settings"][
"new_book_format"
].upper(),
book=bookid,
uncompressed_size=os.path.getsize(file_path + format_new_ext),
)
cur_book.data.append(new_format)
web.db.session.commit()
self.queue[self.current]["path"] = cur_book.path
self.queue[self.current]["title"] = cur_book.title
if web.ub.config.config_use_google_drive:
os.remove(file_path + format_old_ext)
self._handleSuccess()
return file_path + format_new_ext
else:
error_message = format_new_ext.upper() + " format not found on disk"
web.app.logger.info("ebook converter failed with error while converting book")
if not error_message:
error_message = "Ebook converter failed with unknown error"
self._handleError(error_message)
return
|
def _convert_ebook_format(self):
error_message = None
file_path = self.queue[self.current]["file_path"]
bookid = self.queue[self.current]["bookid"]
format_old_ext = (
"." + self.queue[self.current]["settings"]["old_book_format"].lower()
)
format_new_ext = (
"." + self.queue[self.current]["settings"]["new_book_format"].lower()
)
# check to see if destination format already exists -
# if it does - mark the conversion task as complete and return a success
# this will allow send to kindle workflow to continue to work
if os.path.isfile(file_path + format_new_ext):
web.app.logger.info(
"Book id %d already converted to %s", bookid, format_new_ext
)
cur_book = (
web.db.session.query(web.db.Books).filter(web.db.Books.id == bookid).first()
)
self.queue[self.current]["path"] = file_path
self.queue[self.current]["title"] = cur_book.title
self._handleSuccess()
return file_path + format_new_ext
else:
web.app.logger.info(
"Book id %d - target format of %s does not exist. Moving forward with convert.",
bookid,
format_new_ext,
)
# check if converter-executable is existing
if not os.path.exists(web.ub.config.config_converterpath):
# ToDo Text is not translated
self._handleError(
"Convertertool %s not found" % web.ub.config.config_converterpath
)
return
try:
# check which converter to use kindlegen is "1"
if format_old_ext == ".epub" and format_new_ext == ".mobi":
if web.ub.config.config_ebookconverter == 1:
if os.name == "nt":
command = (
web.ub.config.config_converterpath + ' "' + file_path + '.epub"'
)
if sys.version_info < (3, 0):
command = command.encode(sys.getfilesystemencoding())
else:
command = [web.ub.config.config_converterpath, file_path + ".epub"]
if sys.version_info < (3, 0):
command = [
x.encode(sys.getfilesystemencoding()) for x in command
]
if web.ub.config.config_ebookconverter == 2:
# Linux py2.7 encode as list without quotes no empty element for parameters
# linux py3.x no encode and as list without quotes no empty element for parameters
# windows py2.7 encode as string with quotes empty element for parameters is okay
# windows py 3.x no encode and as string with quotes empty element for parameters is okay
# separate handling for windows and linux
if os.name == "nt":
command = (
web.ub.config.config_converterpath
+ ' "'
+ file_path
+ format_old_ext
+ '" "'
+ file_path
+ format_new_ext
+ '" '
+ web.ub.config.config_calibre
)
if sys.version_info < (3, 0):
command = command.encode(sys.getfilesystemencoding())
else:
command = [
web.ub.config.config_converterpath,
(file_path + format_old_ext),
(file_path + format_new_ext),
]
if web.ub.config.config_calibre:
parameters = web.ub.config.config_calibre.split(" ")
for param in parameters:
command.append(param)
if sys.version_info < (3, 0):
command = [x.encode(sys.getfilesystemencoding()) for x in command]
p = subprocess.Popen(command, stdout=subprocess.PIPE, universal_newlines=True)
except OSError as e:
self._handleError(_("Ebook-converter failed: %(error)s", error=e))
return
if web.ub.config.config_ebookconverter == 1:
nextline = p.communicate()[0]
# Format of error message (kindlegen translates its output texts):
# Error(prcgen):E23006: Language not recognized in metadata.The dc:Language field is mandatory.Aborting.
conv_error = re.search(".*\(.*\):(E\d+):\s(.*)", nextline, re.MULTILINE)
# If error occoures, store error message for logfile
if conv_error:
error_message = _(
"Kindlegen failed with Error %(error)s. Message: %(message)s",
error=conv_error.group(1),
message=conv_error.group(2).strip(),
)
web.app.logger.debug("convert_kindlegen: " + nextline)
else:
while p.poll() is None:
nextline = p.stdout.readline()
if os.name == "nt" and sys.version_info < (3, 0):
nextline = nextline.decode("windows-1252")
web.app.logger.debug(nextline.strip("\r\n"))
# parse progress string from calibre-converter
progress = re.search("(\d+)%\s.*", nextline)
if progress:
self.UIqueue[self.current]["progress"] = progress.group(1) + " %"
# process returncode
check = p.returncode
# kindlegen returncodes
# 0 = Info(prcgen):I1036: Mobi file built successfully
# 1 = Info(prcgen):I1037: Mobi file built with WARNINGS!
# 2 = Info(prcgen):I1038: MOBI file could not be generated because of errors!
if (check < 2 and web.ub.config.config_ebookconverter == 1) or (
check == 0 and web.ub.config.config_ebookconverter == 2
):
cur_book = (
web.db.session.query(web.db.Books).filter(web.db.Books.id == bookid).first()
)
if os.path.isfile(file_path + format_new_ext):
new_format = web.db.Data(
name=cur_book.data[0].name,
book_format=self.queue[self.current]["settings"][
"new_book_format"
].upper(),
book=bookid,
uncompressed_size=os.path.getsize(file_path + format_new_ext),
)
cur_book.data.append(new_format)
web.db.session.commit()
self.queue[self.current]["path"] = cur_book.path
self.queue[self.current]["title"] = cur_book.title
if web.ub.config.config_use_google_drive:
os.remove(file_path + format_old_ext)
self._handleSuccess()
return file_path + format_new_ext
else:
error_message = format_new_ext.upper() + " format not found on disk"
web.app.logger.info("ebook converter failed with error while converting book")
if not error_message:
error_message = "Ebook converter failed with unknown error"
self._handleError(error_message)
return
|
https://github.com/janeczku/calibre-web/issues/954
|
[2019-06-28 22:37:50,052] ERROR in app: Exception on /tasks [GET]
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/lib/python2.7/site-packages/flask_login/utils.py", line 261, in decorated_view
return func(*args, **kwargs)
File "/calibre-web/app/cps/web.py", line 1646, in get_tasks_status
tasks=helper.global_WorkerThread.get_taskstatus()
File "/calibre-web/app/cps/worker.py", line 225, in get_taskstatus
datetime.now() - self.queue[self.current]['starttime'])
File "/calibre-web/app/cps/worker.py", line 489, in _formatRuntime
if int(v) > 0:
ValueError: invalid literal for int() with base 10: '1 day, 5'
::ffff:172.17.0.1 - - [2019-06-28 22:37:50] "GET /tasks HTTP/1.1" 500 411 0.011915
|
ValueError
|
def _handleError(self, error_message):
web.app.logger.error(error_message)
self.UIqueue[self.current]["stat"] = STAT_FAIL
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["formRuntime"] = (
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["message"] = error_message
|
def _handleError(self, error_message):
web.app.logger.error(error_message)
self.UIqueue[self.current]["stat"] = STAT_FAIL
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["message"] = error_message
|
https://github.com/janeczku/calibre-web/issues/954
|
[2019-06-28 22:37:50,052] ERROR in app: Exception on /tasks [GET]
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/lib/python2.7/site-packages/flask_login/utils.py", line 261, in decorated_view
return func(*args, **kwargs)
File "/calibre-web/app/cps/web.py", line 1646, in get_tasks_status
tasks=helper.global_WorkerThread.get_taskstatus()
File "/calibre-web/app/cps/worker.py", line 225, in get_taskstatus
datetime.now() - self.queue[self.current]['starttime'])
File "/calibre-web/app/cps/worker.py", line 489, in _formatRuntime
if int(v) > 0:
ValueError: invalid literal for int() with base 10: '1 day, 5'
::ffff:172.17.0.1 - - [2019-06-28 22:37:50] "GET /tasks HTTP/1.1" 500 411 0.011915
|
ValueError
|
def _handleSuccess(self):
self.UIqueue[self.current]["stat"] = STAT_FINISH_SUCCESS
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["formRuntime"] = (
datetime.now() - self.queue[self.current]["starttime"]
)
|
def _handleSuccess(self):
self.UIqueue[self.current]["stat"] = STAT_FINISH_SUCCESS
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
|
https://github.com/janeczku/calibre-web/issues/954
|
[2019-06-28 22:37:50,052] ERROR in app: Exception on /tasks [GET]
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/lib/python2.7/site-packages/flask_login/utils.py", line 261, in decorated_view
return func(*args, **kwargs)
File "/calibre-web/app/cps/web.py", line 1646, in get_tasks_status
tasks=helper.global_WorkerThread.get_taskstatus()
File "/calibre-web/app/cps/worker.py", line 225, in get_taskstatus
datetime.now() - self.queue[self.current]['starttime'])
File "/calibre-web/app/cps/worker.py", line 489, in _formatRuntime
if int(v) > 0:
ValueError: invalid literal for int() with base 10: '1 day, 5'
::ffff:172.17.0.1 - - [2019-06-28 22:37:50] "GET /tasks HTTP/1.1" 500 411 0.011915
|
ValueError
|
def pdf_preview(tmp_file_path, tmp_dir):
if use_generic_pdf_cover:
return None
else:
if use_PIL:
try:
input1 = PdfFileReader(open(tmp_file_path, "rb"), strict=False)
page0 = input1.getPage(0)
xObject = page0["/Resources"]["/XObject"].getObject()
for obj in xObject:
if xObject[obj]["/Subtype"] == "/Image":
size = (xObject[obj]["/Width"], xObject[obj]["/Height"])
data = xObject[obj]._data # xObject[obj].getData()
if xObject[obj]["/ColorSpace"] == "/DeviceRGB":
mode = "RGB"
else:
mode = "P"
if "/Filter" in xObject[obj]:
if xObject[obj]["/Filter"] == "/FlateDecode":
img = Image.frombytes(mode, size, data)
cover_file_name = (
os.path.splitext(tmp_file_path)[0] + ".cover.png"
)
img.save(
filename=os.path.join(tmp_dir, cover_file_name)
)
return cover_file_name
# img.save(obj[1:] + ".png")
elif xObject[obj]["/Filter"] == "/DCTDecode":
cover_file_name = (
os.path.splitext(tmp_file_path)[0] + ".cover.jpg"
)
img = open(cover_file_name, "wb")
img.write(data)
img.close()
return cover_file_name
elif xObject[obj]["/Filter"] == "/JPXDecode":
cover_file_name = (
os.path.splitext(tmp_file_path)[0] + ".cover.jp2"
)
img = open(cover_file_name, "wb")
img.write(data)
img.close()
return cover_file_name
else:
img = Image.frombytes(mode, size, data)
cover_file_name = (
os.path.splitext(tmp_file_path)[0] + ".cover.png"
)
img.save(filename=os.path.join(tmp_dir, cover_file_name))
return cover_file_name
# img.save(obj[1:] + ".png")
except Exception as ex:
print(ex)
try:
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.jpg"
with Image(filename=tmp_file_path + "[0]", resolution=150) as img:
img.compression_quality = 88
img.save(filename=os.path.join(tmp_dir, cover_file_name))
return cover_file_name
except PolicyError as ex:
logger.warning("Pdf extraction forbidden by Imagemagick policy: %s", ex)
return None
except Exception as ex:
logger.warning("Cannot extract cover image, using default: %s", ex)
return None
|
def pdf_preview(tmp_file_path, tmp_dir):
if use_generic_pdf_cover:
return None
else:
try:
input1 = PdfFileReader(open(tmp_file_path, "rb"), strict=False)
page0 = input1.getPage(0)
xObject = page0["/Resources"]["/XObject"].getObject()
for obj in xObject:
if xObject[obj]["/Subtype"] == "/Image":
size = (xObject[obj]["/Width"], xObject[obj]["/Height"])
data = xObject[obj]._data # xObject[obj].getData()
if xObject[obj]["/ColorSpace"] == "/DeviceRGB":
mode = "RGB"
else:
mode = "P"
if "/Filter" in xObject[obj]:
if xObject[obj]["/Filter"] == "/FlateDecode":
img = Image.frombytes(mode, size, data)
cover_file_name = (
os.path.splitext(tmp_file_path)[0] + ".cover.png"
)
img.save(filename=os.path.join(tmp_dir, cover_file_name))
return cover_file_name
# img.save(obj[1:] + ".png")
elif xObject[obj]["/Filter"] == "/DCTDecode":
cover_file_name = (
os.path.splitext(tmp_file_path)[0] + ".cover.jpg"
)
img = open(cover_file_name, "wb")
img.write(data)
img.close()
return cover_file_name
elif xObject[obj]["/Filter"] == "/JPXDecode":
cover_file_name = (
os.path.splitext(tmp_file_path)[0] + ".cover.jp2"
)
img = open(cover_file_name, "wb")
img.write(data)
img.close()
return cover_file_name
else:
img = Image.frombytes(mode, size, data)
cover_file_name = (
os.path.splitext(tmp_file_path)[0] + ".cover.png"
)
img.save(filename=os.path.join(tmp_dir, cover_file_name))
return cover_file_name
# img.save(obj[1:] + ".png")
except Exception as ex:
print(ex)
try:
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.jpg"
with Image(filename=tmp_file_path + "[0]", resolution=150) as img:
img.compression_quality = 88
img.save(filename=os.path.join(tmp_dir, cover_file_name))
return cover_file_name
except PolicyError as ex:
logger.warning("Pdf extraction forbidden by Imagemagick policy: %s", ex)
return None
except Exception as ex:
logger.warning("Cannot extract cover image, using default: %s", ex)
return None
|
https://github.com/janeczku/calibre-web/issues/885
|
c:\CalibreWeb>python37 cps.py
Traceback (most recent call last):
File "cps.py", line 13, in <module>
from cps.server import Server
File "c:\CalibreWeb\cps\server.py", line 25, in <module>
import web
File "c:\CalibreWeb\cps\web.py", line 38, in <module>
import helper
File "c:\CalibreWeb\cps\helper.py", line 38, in <module>
from PIL import Image
ModuleNotFoundError: No module named 'PIL'
|
ModuleNotFoundError
|
def save_cover(img, book_path):
content_type = img.headers.get("content-type")
if content_type not in ("image/jpeg", "image/png", "image/webp"):
web.app.logger.error("Only jpg/jpeg/png/webp files are supported as coverfile")
return False
if use_PIL:
# convert to jpg because calibre only supports jpg
if content_type in ("image/png", "image/webp"):
if hasattr(img, "stream"):
imgc = Image.open(img.stream)
else:
imgc = Image.open(io.BytesIO(img.content))
im = imgc.convert("RGB")
tmp_bytesio = io.BytesIO()
im.save(tmp_bytesio, format="JPEG")
img._content = tmp_bytesio.getvalue()
if ub.config.config_use_google_drive:
tmpDir = gettempdir()
if save_cover_from_filestorage(tmpDir, "uploaded_cover.jpg", img) is True:
gd.uploadFileToEbooksFolder(
os.path.join(book_path, "cover.jpg"),
os.path.join(tmpDir, "uploaded_cover.jpg"),
)
web.app.logger.info("Cover is saved on Google Drive")
return True
else:
return False
else:
return save_cover_from_filestorage(
os.path.join(ub.config.config_calibre_dir, book_path), "cover.jpg", img
)
|
def save_cover(img, book_path):
content_type = img.headers.get("content-type")
if content_type not in ("image/jpeg", "image/png", "image/webp"):
web.app.logger.error("Only jpg/jpeg/png/webp files are supported as coverfile")
return False
# convert to jpg because calibre only supports jpg
if content_type in ("image/png", "image/webp"):
if hasattr(img, "stream"):
imgc = Image.open(img.stream)
else:
imgc = Image.open(io.BytesIO(img.content))
im = imgc.convert("RGB")
tmp_bytesio = io.BytesIO()
im.save(tmp_bytesio, format="JPEG")
img._content = tmp_bytesio.getvalue()
if ub.config.config_use_google_drive:
tmpDir = gettempdir()
if save_cover_from_filestorage(tmpDir, "uploaded_cover.jpg", img) is True:
gd.uploadFileToEbooksFolder(
os.path.join(book_path, "cover.jpg"),
os.path.join(tmpDir, "uploaded_cover.jpg"),
)
web.app.logger.info("Cover is saved on Google Drive")
return True
else:
return False
else:
return save_cover_from_filestorage(
os.path.join(ub.config.config_calibre_dir, book_path), "cover.jpg", img
)
|
https://github.com/janeczku/calibre-web/issues/885
|
c:\CalibreWeb>python37 cps.py
Traceback (most recent call last):
File "cps.py", line 13, in <module>
from cps.server import Server
File "c:\CalibreWeb\cps\server.py", line 25, in <module>
import web
File "c:\CalibreWeb\cps\web.py", line 38, in <module>
import helper
File "c:\CalibreWeb\cps\helper.py", line 38, in <module>
from PIL import Image
ModuleNotFoundError: No module named 'PIL'
|
ModuleNotFoundError
|
def convert_kindlegen(self):
error_message = None
file_path = self.queue[self.current]["file_path"]
bookid = self.queue[self.current]["bookid"]
if not os.path.exists(web.ub.config.config_converterpath):
error_message = _(
"kindlegen binary %(kindlepath)s not found",
kindlepath=web.ub.config.config_converterpath,
)
web.app.logger.error("convert_kindlegen: " + error_message)
self.queue[self.current]["status"] = STAT_FAIL
self.UIqueue[self.current]["status"] = _("Failed")
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["message"] = error_message
return
try:
command = (
web.ub.config.config_converterpath + ' "' + file_path + '.epub"'
).encode(sys.getfilesystemencoding())
if sys.version_info > (3, 0):
p = subprocess.Popen(
command.decode("Utf-8"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
else:
p = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
except Exception:
error_message = _("kindlegen failed, no execution permissions")
web.app.logger.error("convert_kindlegen: " + error_message)
self.queue[self.current]["status"] = STAT_FAIL
self.UIqueue[self.current]["status"] = _("Failed")
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["message"] = error_message
return
# Poll process for new output until finished
while True:
nextline = p.stdout.readline()
if sys.version_info > (3, 0):
nextline = nextline.decode("Utf-8", "backslashreplace")
if nextline == "" and p.poll() is not None:
break
if nextline != "\r\n":
# Format of error message (kindlegen translates its output texts):
# Error(prcgen):E23006: Language not recognized in metadata.The dc:Language field is mandatory.Aborting.
conv_error = re.search(".*\(.*\):(E\d+):\s(.*)", nextline)
# If error occoures, log in every case
if conv_error:
if sys.version_info > (3, 0):
error_message = _(
"Kindlegen failed with Error %(error)s. Message: %(message)s",
error=conv_error.group(1),
message=conv_error.group(2),
)
else:
error_message = _(
"Kindlegen failed with Error %(error)s. Message: %(message)s",
error=conv_error.group(1),
message=conv_error.group(2).decode("utf-8"),
)
web.app.logger.info("convert_kindlegen: " + error_message)
web.app.logger.info(nextline.strip("\r\n"))
else:
web.app.logger.debug(nextline.strip("\r\n"))
check = p.returncode
if not check or check < 2:
cur_book = (
web.db.session.query(web.db.Books).filter(web.db.Books.id == bookid).first()
)
new_format = web.db.Data(
name=cur_book.data[0].name,
book_format="MOBI",
book=bookid,
uncompressed_size=os.path.getsize(file_path + ".mobi"),
)
cur_book.data.append(new_format)
web.db.session.commit()
self.queue[self.current]["path"] = cur_book.path
self.queue[self.current]["title"] = cur_book.title
if web.ub.config.config_use_google_drive:
os.remove(file_path + ".epub")
self.queue[self.current]["status"] = STAT_FINISH_SUCCESS
self.UIqueue[self.current]["status"] = _("Finished")
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
return file_path + ".mobi" # , RET_SUCCESS
else:
web.app.logger.info(
"convert_kindlegen: kindlegen failed with error while converting book"
)
if not error_message:
error_message = "kindlegen failed, no excecution permissions"
self.queue[self.current]["status"] = STAT_FAIL
self.UIqueue[self.current]["status"] = _("Failed")
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["message"] = error_message
return # error_message, RET_FAIL
|
def convert_kindlegen(self):
error_message = None
file_path = self.queue[self.current]["file_path"]
bookid = self.queue[self.current]["bookid"]
if not os.path.exists(web.ub.config.config_converterpath):
error_message = _(
"kindlegen binary %(kindlepath)s not found",
kindlepath=web.ub.config.config_converterpath,
)
web.app.logger.error("convert_kindlegen: " + error_message)
self.queue[self.current]["status"] = STAT_FAIL
self.UIqueue[self.current]["status"] = _("Failed")
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["message"] = error_message
return
try:
p = subprocess.Popen(
(web.ub.config.config_converterpath + ' "' + file_path + '.epub"').encode(
sys.getfilesystemencoding()
),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
except Exception:
error_message = _("kindlegen failed, no execution permissions")
web.app.logger.error("convert_kindlegen: " + error_message)
self.queue[self.current]["status"] = STAT_FAIL
self.UIqueue[self.current]["status"] = _("Failed")
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["message"] = error_message
return
# Poll process for new output until finished
while True:
nextline = p.stdout.readline()
if nextline == "" and p.poll() is not None:
break
if nextline != "\r\n":
# Format of error message (kindlegen translates its output texts):
# Error(prcgen):E23006: Language not recognized in metadata.The dc:Language field is mandatory.Aborting.
conv_error = re.search(".*\(.*\):(E\d+):\s(.*)", nextline)
# If error occoures, log in every case
if conv_error:
error_message = _(
"Kindlegen failed with Error %(error)s. Message: %(message)s",
error=conv_error.group(1),
message=conv_error.group(2).decode("utf-8"),
)
web.app.logger.info("convert_kindlegen: " + error_message)
web.app.logger.info(nextline.strip("\r\n"))
else:
web.app.logger.debug(nextline.strip("\r\n"))
check = p.returncode
if not check or check < 2:
cur_book = (
web.db.session.query(web.db.Books).filter(web.db.Books.id == bookid).first()
)
new_format = web.db.Data(
name=cur_book.data[0].name,
book_format="MOBI",
book=bookid,
uncompressed_size=os.path.getsize(file_path + ".mobi"),
)
cur_book.data.append(new_format)
web.db.session.commit()
self.queue[self.current]["path"] = cur_book.path
self.queue[self.current]["title"] = cur_book.title
if web.ub.config.config_use_google_drive:
os.remove(file_path + ".epub")
self.queue[self.current]["status"] = STAT_FINISH_SUCCESS
self.UIqueue[self.current]["status"] = _("Finished")
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
return file_path + ".mobi" # , RET_SUCCESS
else:
web.app.logger.info(
"convert_kindlegen: kindlegen failed with error while converting book"
)
if not error_message:
error_message = "kindlegen failed, no excecution permissions"
self.queue[self.current]["status"] = STAT_FAIL
self.UIqueue[self.current]["status"] = _("Failed")
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["message"] = error_message
return # error_message, RET_FAIL
|
https://github.com/janeczku/calibre-web/issues/574
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/opt/calibre-web/cps/worker.py", line 167, in run
self.send_raw_email()
File "/opt/calibre-web/cps/worker.py", line 444, in send_raw_email
self.asyncSMTP = email_SSL(obj['settings']["mail_server"], obj['settings']["mail_port"], timeout)
File "/opt/calibre-web/cps/worker.py", line 142, in __init__
smtplib.SMTP_SSL.__init__(self, *args, **kwargs)
TypeError: unbound method __init__() must be called with SMTP_SSL instance as first argument (got email_SSL instance instead)
|
TypeError
|
def convert_calibre(self):
error_message = None
file_path = self.queue[self.current]["file_path"]
bookid = self.queue[self.current]["bookid"]
if not os.path.exists(web.ub.config.config_converterpath):
error_message = _(
"Ebook-convert binary %(converterpath)s not found",
converterpath=web.ub.config.config_converterpath,
)
web.app.logger.error("convert_calibre: " + error_message)
self.queue[self.current]["status"] = STAT_FAIL
self.UIqueue[self.current]["status"] = _("Failed")
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["message"] = error_message
return
try:
command = (
'"'
+ web.ub.config.config_converterpath
+ '" "'
+ file_path
+ '.epub" "'
+ file_path
+ '.mobi" '
+ web.ub.config.config_calibre
).encode(sys.getfilesystemencoding())
if sys.version_info > (3, 0):
p = subprocess.Popen(
command.decode("Utf-8"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
else:
p = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
except Exception as e:
error_message = _("Ebook-convert failed, no execution permissions")
web.app.logger.error("convert_calibre: " + error_message)
self.queue[self.current]["status"] = STAT_FAIL
self.UIqueue[self.current]["status"] = _("Failed")
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["message"] = error_message
return # error_message, RET_FAIL
# Poll process for new output until finished
while True:
nextline = p.stdout.readline()
if sys.version_info > (3, 0):
nextline = nextline.decode("Utf-8", "backslashreplace")
if nextline == "" and p.poll() is not None:
break
progress = re.search("(\d+)%\s.*", nextline)
if progress:
self.UIqueue[self.current]["progress"] = progress.group(1) + "%"
if sys.version_info > (3, 0):
web.app.logger.debug(nextline.strip("\r\n"))
else:
web.app.logger.debug(
nextline.strip("\r\n").decode(sys.getfilesystemencoding())
)
check = p.returncode
if check == 0:
cur_book = (
web.db.session.query(web.db.Books).filter(web.db.Books.id == bookid).first()
)
new_format = web.db.Data(
name=cur_book.data[0].name,
book_format="MOBI",
book=bookid,
uncompressed_size=os.path.getsize(file_path + ".mobi"),
)
cur_book.data.append(new_format)
web.db.session.commit()
self.queue[self.current]["path"] = cur_book.path
self.queue[self.current]["title"] = cur_book.title
if web.ub.config.config_use_google_drive:
os.remove(file_path + ".epub")
self.queue[self.current]["status"] = STAT_FINISH_SUCCESS
self.UIqueue[self.current]["status"] = _("Finished")
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
return file_path + ".mobi" # , RET_SUCCESS
else:
web.app.logger.info(
"convert_calibre: Ebook-convert failed with error while converting book"
)
if not error_message:
error_message = "Ebook-convert failed, no excecution permissions"
self.queue[self.current]["status"] = STAT_FAIL
self.UIqueue[self.current]["status"] = _("Failed")
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["message"] = error_message
return # error_message, RET_FAIL
|
def convert_calibre(self):
error_message = None
file_path = self.queue[self.current]["file_path"]
bookid = self.queue[self.current]["bookid"]
if not os.path.exists(web.ub.config.config_converterpath):
error_message = _(
"Ebook-convert binary %(converterpath)s not found",
converterpath=web.ub.config.config_converterpath,
)
web.app.logger.error("convert_calibre: " + error_message)
self.queue[self.current]["status"] = STAT_FAIL
self.UIqueue[self.current]["status"] = _("Failed")
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["message"] = error_message
return
try:
command = (
'"'
+ web.ub.config.config_converterpath
+ '" "'
+ file_path
+ '.epub" "'
+ file_path
+ '.mobi" '
+ web.ub.config.config_calibre
).encode(sys.getfilesystemencoding())
p = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
except Exception:
error_message = _("Ebook-convert failed, no execution permissions")
web.app.logger.error("convert_calibre: " + error_message)
self.queue[self.current]["status"] = STAT_FAIL
self.UIqueue[self.current]["status"] = _("Failed")
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["message"] = error_message
return # error_message, RET_FAIL
# Poll process for new output until finished
while True:
nextline = p.stdout.readline()
if nextline == "" and p.poll() is not None:
break
progress = re.search("(\d+)%\s.*", nextline)
if progress:
self.UIqueue[self.current]["progress"] = progress.group(1) + "%"
web.app.logger.debug(nextline.strip("\r\n").decode(sys.getfilesystemencoding()))
check = p.returncode
if check == 0:
cur_book = (
web.db.session.query(web.db.Books).filter(web.db.Books.id == bookid).first()
)
new_format = web.db.Data(
name=cur_book.data[0].name,
book_format="MOBI",
book=bookid,
uncompressed_size=os.path.getsize(file_path + ".mobi"),
)
cur_book.data.append(new_format)
web.db.session.commit()
self.queue[self.current]["path"] = cur_book.path
self.queue[self.current]["title"] = cur_book.title
if web.ub.config.config_use_google_drive:
os.remove(file_path + ".epub")
self.queue[self.current]["status"] = STAT_FINISH_SUCCESS
self.UIqueue[self.current]["status"] = _("Finished")
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
return file_path + ".mobi" # , RET_SUCCESS
else:
web.app.logger.info(
"convert_calibre: Ebook-convert failed with error while converting book"
)
if not error_message:
error_message = "Ebook-convert failed, no excecution permissions"
self.queue[self.current]["status"] = STAT_FAIL
self.UIqueue[self.current]["status"] = _("Failed")
self.UIqueue[self.current]["progress"] = "100 %"
self.UIqueue[self.current]["runtime"] = self._formatRuntime(
datetime.now() - self.queue[self.current]["starttime"]
)
self.UIqueue[self.current]["message"] = error_message
return # error_message, RET_FAIL
|
https://github.com/janeczku/calibre-web/issues/574
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/opt/calibre-web/cps/worker.py", line 167, in run
self.send_raw_email()
File "/opt/calibre-web/cps/worker.py", line 444, in send_raw_email
self.asyncSMTP = email_SSL(obj['settings']["mail_server"], obj['settings']["mail_port"], timeout)
File "/opt/calibre-web/cps/worker.py", line 142, in __init__
smtplib.SMTP_SSL.__init__(self, *args, **kwargs)
TypeError: unbound method __init__() must be called with SMTP_SSL instance as first argument (got email_SSL instance instead)
|
TypeError
|
def pdf_meta(tmp_file_path, original_file_name, original_file_extension):
if use_pdf_meta:
pdf = PdfFileReader(open(tmp_file_path, "rb"), strict=False)
doc_info = pdf.getDocumentInfo()
else:
doc_info = None
if doc_info is not None:
author = doc_info.author if doc_info.author else "Unknown"
title = doc_info.title if doc_info.title else original_file_name
subject = doc_info.subject
else:
author = "Unknown"
title = original_file_name
subject = ""
return uploader.BookMeta(
file_path=tmp_file_path,
extension=original_file_extension,
title=title,
author=author,
cover=pdf_preview(tmp_file_path, original_file_name),
description=subject,
tags="",
series="",
series_id="",
languages="",
)
|
def pdf_meta(tmp_file_path, original_file_name, original_file_extension):
if use_pdf_meta:
pdf = PdfFileReader(open(tmp_file_path, "rb"))
doc_info = pdf.getDocumentInfo()
else:
doc_info = None
if doc_info is not None:
author = doc_info.author if doc_info.author else "Unknown"
title = doc_info.title if doc_info.title else original_file_name
subject = doc_info.subject
else:
author = "Unknown"
title = original_file_name
subject = ""
return uploader.BookMeta(
file_path=tmp_file_path,
extension=original_file_extension,
title=title,
author=author,
cover=pdf_preview(tmp_file_path, original_file_name),
description=subject,
tags="",
series="",
series_id="",
languages="",
)
|
https://github.com/janeczku/calibre-web/issues/20
|
ERROR:cps.web:Exception on /book/125 [GET]
Traceback (most recent call last):
File "/Users/jgillman/Code/calibre-web/vendor/flask/app.py", line 1817, in wsgi_app
response = self.full_dispatch_request()
File "/Users/jgillman/Code/calibre-web/vendor/flask/app.py", line 1477, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/jgillman/Code/calibre-web/vendor/flask/app.py", line 1381, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/jgillman/Code/calibre-web/vendor/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/jgillman/Code/calibre-web/vendor/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/jgillman/Code/calibre-web/vendor/flask_login.py", line 717, in decorated_view
return func(*args, **kwargs)
File "/Users/jgillman/Code/calibre-web/cps/web.py", line 381, in show_book
return render_template('detail.html', entry=entries, cc=cc, title=entries.title, books_shelfs=book_in_shelfs)
File "/Users/jgillman/Code/calibre-web/vendor/flask/templating.py", line 128, in render_template
context, ctx.app)
File "/Users/jgillman/Code/calibre-web/vendor/flask/templating.py", line 110, in _render
rv = template.render(context)
File "/Users/jgillman/Code/calibre-web/vendor/jinja2/environment.py", line 969, in render
return self.environment.handle_exception(exc_info, True)
File "/Users/jgillman/Code/calibre-web/vendor/jinja2/environment.py", line 742, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/jgillman/Code/calibre-web/cps/templates/detail.html", line 1, in top-level template code
{% extends "layout.html" %}
File "/Users/jgillman/Code/calibre-web/cps/templates/layout.html", line 136, in top-level template code
{% block body %}{% endblock %}
File "/Users/jgillman/Code/calibre-web/cps/templates/detail.html", line 66, in block "body"
{% if entry['custom_column_' ~ c.id]|length > 0 %}
File "/Users/jgillman/Code/calibre-web/vendor/jinja2/environment.py", line 387, in getitem
return getattr(obj, attr)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/orm/attributes.py", line 316, in __get__
return self.impl.get(instance_state(instance), dict_)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/orm/attributes.py", line 613, in get
value = self.callable_(state, passive)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/orm/strategies.py", line 524, in _load_for_state
return self._emit_lazyload(session, state, ident_key, passive)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/orm/strategies.py", line 585, in _emit_lazyload
result = q.all()
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/orm/query.py", line 2241, in all
return list(self)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/orm/query.py", line 2353, in __iter__
return self._execute_and_instances(context)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/orm/query.py", line 2368, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/engine/base.py", line 662, in execute
params)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/engine/base.py", line 761, in _execute_clauseelement
compiled_sql, distilled_params
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/engine/base.py", line 874, in _execute_context
context)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/engine/base.py", line 1024, in _handle_dbapi_exception
exc_info
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/util/compat.py", line 196, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/engine/base.py", line 867, in _execute_context
context)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/engine/default.py", line 324, in do_execute
cursor.execute(statement, parameters)
OperationalError: (OperationalError) no such table: books_custom_column_1_link u'SELECT custom_column_1.id AS custom_column_1_id, custom_column_1.value AS custom_column_1_value \
nFROM custom_column_1, books_custom_column_1_link \nWHERE ? = books_custom_column_1_link.book AND custom_column_1.id = books_custom_column_1_link.value' (125,)
ERROR:tornado.access:500 GET /book/125 (::1) 89.60ms
|
OperationalError
|
def pdf_preview(tmp_file_path, tmp_dir):
if use_generic_pdf_cover:
return None
else:
try:
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.jpg"
with Image(filename=tmp_file_path + "[0]", resolution=150) as img:
img.compression_quality = 88
img.save(filename=os.path.join(tmp_dir, cover_file_name))
return cover_file_name
except PolicyError as ex:
logger.warning("Pdf extraction forbidden by Imagemagick policy: %s", ex)
return None
except Exception as ex:
logger.warning("Cannot extract cover image, using default: %s", ex)
return None
|
def pdf_preview(tmp_file_path, tmp_dir):
if use_generic_pdf_cover:
return None
else:
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.jpg"
with Image(filename=tmp_file_path + "[0]", resolution=150) as img:
img.compression_quality = 88
img.save(filename=os.path.join(tmp_dir, cover_file_name))
return cover_file_name
|
https://github.com/janeczku/calibre-web/issues/20
|
ERROR:cps.web:Exception on /book/125 [GET]
Traceback (most recent call last):
File "/Users/jgillman/Code/calibre-web/vendor/flask/app.py", line 1817, in wsgi_app
response = self.full_dispatch_request()
File "/Users/jgillman/Code/calibre-web/vendor/flask/app.py", line 1477, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/jgillman/Code/calibre-web/vendor/flask/app.py", line 1381, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/jgillman/Code/calibre-web/vendor/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/jgillman/Code/calibre-web/vendor/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/jgillman/Code/calibre-web/vendor/flask_login.py", line 717, in decorated_view
return func(*args, **kwargs)
File "/Users/jgillman/Code/calibre-web/cps/web.py", line 381, in show_book
return render_template('detail.html', entry=entries, cc=cc, title=entries.title, books_shelfs=book_in_shelfs)
File "/Users/jgillman/Code/calibre-web/vendor/flask/templating.py", line 128, in render_template
context, ctx.app)
File "/Users/jgillman/Code/calibre-web/vendor/flask/templating.py", line 110, in _render
rv = template.render(context)
File "/Users/jgillman/Code/calibre-web/vendor/jinja2/environment.py", line 969, in render
return self.environment.handle_exception(exc_info, True)
File "/Users/jgillman/Code/calibre-web/vendor/jinja2/environment.py", line 742, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/jgillman/Code/calibre-web/cps/templates/detail.html", line 1, in top-level template code
{% extends "layout.html" %}
File "/Users/jgillman/Code/calibre-web/cps/templates/layout.html", line 136, in top-level template code
{% block body %}{% endblock %}
File "/Users/jgillman/Code/calibre-web/cps/templates/detail.html", line 66, in block "body"
{% if entry['custom_column_' ~ c.id]|length > 0 %}
File "/Users/jgillman/Code/calibre-web/vendor/jinja2/environment.py", line 387, in getitem
return getattr(obj, attr)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/orm/attributes.py", line 316, in __get__
return self.impl.get(instance_state(instance), dict_)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/orm/attributes.py", line 613, in get
value = self.callable_(state, passive)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/orm/strategies.py", line 524, in _load_for_state
return self._emit_lazyload(session, state, ident_key, passive)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/orm/strategies.py", line 585, in _emit_lazyload
result = q.all()
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/orm/query.py", line 2241, in all
return list(self)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/orm/query.py", line 2353, in __iter__
return self._execute_and_instances(context)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/orm/query.py", line 2368, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/engine/base.py", line 662, in execute
params)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/engine/base.py", line 761, in _execute_clauseelement
compiled_sql, distilled_params
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/engine/base.py", line 874, in _execute_context
context)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/engine/base.py", line 1024, in _handle_dbapi_exception
exc_info
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/util/compat.py", line 196, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/engine/base.py", line 867, in _execute_context
context)
File "/Users/jgillman/Code/calibre-web/vendor/sqlalchemy/engine/default.py", line 324, in do_execute
cursor.execute(statement, parameters)
OperationalError: (OperationalError) no such table: books_custom_column_1_link u'SELECT custom_column_1.id AS custom_column_1_id, custom_column_1.value AS custom_column_1_value \
nFROM custom_column_1, books_custom_column_1_link \nWHERE ? = books_custom_column_1_link.book AND custom_column_1.id = books_custom_column_1_link.value' (125,)
ERROR:tornado.access:500 GET /book/125 (::1) 89.60ms
|
OperationalError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.