after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def any(a, axis=None, out=None, keepdims=False):
assert isinstance(a, cupy.ndarray)
return a.any(axis=axis, out=out, keepdims=keepdims)
|
def any(a, axis=None, out=None, keepdims=False):
# TODO(okuta): check type
return a.any(axis=axis, out=out, keepdims=keepdims)
|
https://github.com/cupy/cupy/issues/266
|
np.empty((0, 1)).argmax(axis=1) # array([], dtype=int64)
cupy.empty((0, 1)).argmax(axis=1)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-9-a5737d72bcba> in <module>()
----> 1 cupy.empty((0, 1)).argmax(axis=1)
cupy/core/core.pyx in cupy.core.core.ndarray.argmax (cupy/core/core.cpp:17701)()
cupy/core/core.pyx in cupy.core.core.ndarray.argmax (cupy/core/core.cpp:17556)()
cupy/core/reduction.pxi in cupy.core.core.simple_reduction_function.__call__ (cupy/core/core.cpp:52697)()
ValueError: zero-size array to reduction operation cupy_argmax which has no identity
|
ValueError
|
def nonzero(a):
"""Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of a,
containing the indices of the non-zero elements in that dimension.
Args:
a (cupy.ndarray): array
Returns:
tuple of arrays: Indices of elements that are non-zero.
.. seealso:: :func:`numpy.nonzero`
"""
assert isinstance(a, core.ndarray)
return a.nonzero()
|
def nonzero(a):
"""Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of a,
containing the indices of the non-zero elements in that dimension.
Args:
a (cupy.ndarray): array
Returns:
tuple of arrays: Indices of elements that are non-zero.
.. seealso:: :func:`numpy.nonzero`
"""
return a.nonzero()
|
https://github.com/cupy/cupy/issues/252
|
numpy
shape=() => (array([0]),)
shape=(0,) => (array([], dtype=int64),)
shape=(1,) => (array([0]),)
shape=(0, 2) => (array([], dtype=int64), array([], dtype=int64))
shape=(0, 0, 2, 0) => (array([], dtype=int64), array([], dtype=int64), array([], dtype=int64), array([], dtype=int64))
cupy
shape=() => (array([0]),)
shape=(0,) => FAIL: CUDA_ERROR_INVALID_VALUE: invalid argument
shape=(1,) => (array([0]),)
shape=(0, 2) => FAIL: CUDA_ERROR_INVALID_VALUE: invalid argument
shape=(0, 0, 2, 0) => FAIL: CUDA_ERROR_INVALID_VALUE: invalid argument
Traceback (most recent call last):
File "test-nonzero.py", line 26, in <module>
cupy.nonzero(cupy.ones((0,)))
File "/niboshi/repos/cupy/cupy/sorting/search.py", line 72, in nonzero
return a.nonzero()
File "cupy/core/core.pyx", line 810, in cupy.core.core.ndarray.nonzero (cupy/core/core.cpp:16210)
scan_index = scan(condition.astype(dtype).ravel())
File "cupy/core/core.pyx", line 3883, in cupy.core.core.scan (cupy/core/core.cpp:83826)
kern_scan(grid=((a.size - 1) // (2 * block_size) + 1,),
File "cupy/cuda/function.pyx", line 118, in cupy.cuda.function.Function.__call__ (cupy/cuda/function.cpp:3794)
_launch(
File "cupy/cuda/function.pyx", line 100, in cupy.cuda.function._launch (cupy/cuda/function.cpp:3431)
driver.launchKernel(
File "cupy/cuda/driver.pyx", line 170, in cupy.cuda.driver.launchKernel (cupy/cuda/driver.cpp:3262)
check_status(status)
File "cupy/cuda/driver.pyx", line 70, in cupy.cuda.driver.check_status (cupy/cuda/driver.cpp:1481)
raise CUDADriverError(status)
cupy.cuda.driver.CUDADriverError: CUDA_ERROR_INVALID_VALUE: invalid argument
|
cupy.cuda.driver.CUDADriverError
|
def flatnonzero(a):
"""Return indices that are non-zero in the flattened version of a.
This is equivalent to a.ravel().nonzero()[0].
Args:
a (cupy.ndarray): input array
Returns:
cupy.ndarray: Output array,
containing the indices of the elements of a.ravel() that are non-zero.
.. seealso:: :func:`numpy.flatnonzero`
"""
assert isinstance(a, core.ndarray)
return a.ravel().nonzero()[0]
|
def flatnonzero(a):
"""Return indices that are non-zero in the flattened version of a.
This is equivalent to a.ravel().nonzero()[0].
Args:
a (cupy.ndarray): input array
Returns:
cupy.ndarray: Output array,
containing the indices of the elements of a.ravel() that are non-zero.
.. seealso:: :func:`numpy.flatnonzero`
"""
return a.ravel().nonzero()[0]
|
https://github.com/cupy/cupy/issues/252
|
numpy
shape=() => (array([0]),)
shape=(0,) => (array([], dtype=int64),)
shape=(1,) => (array([0]),)
shape=(0, 2) => (array([], dtype=int64), array([], dtype=int64))
shape=(0, 0, 2, 0) => (array([], dtype=int64), array([], dtype=int64), array([], dtype=int64), array([], dtype=int64))
cupy
shape=() => (array([0]),)
shape=(0,) => FAIL: CUDA_ERROR_INVALID_VALUE: invalid argument
shape=(1,) => (array([0]),)
shape=(0, 2) => FAIL: CUDA_ERROR_INVALID_VALUE: invalid argument
shape=(0, 0, 2, 0) => FAIL: CUDA_ERROR_INVALID_VALUE: invalid argument
Traceback (most recent call last):
File "test-nonzero.py", line 26, in <module>
cupy.nonzero(cupy.ones((0,)))
File "/niboshi/repos/cupy/cupy/sorting/search.py", line 72, in nonzero
return a.nonzero()
File "cupy/core/core.pyx", line 810, in cupy.core.core.ndarray.nonzero (cupy/core/core.cpp:16210)
scan_index = scan(condition.astype(dtype).ravel())
File "cupy/core/core.pyx", line 3883, in cupy.core.core.scan (cupy/core/core.cpp:83826)
kern_scan(grid=((a.size - 1) // (2 * block_size) + 1,),
File "cupy/cuda/function.pyx", line 118, in cupy.cuda.function.Function.__call__ (cupy/cuda/function.cpp:3794)
_launch(
File "cupy/cuda/function.pyx", line 100, in cupy.cuda.function._launch (cupy/cuda/function.cpp:3431)
driver.launchKernel(
File "cupy/cuda/driver.pyx", line 170, in cupy.cuda.driver.launchKernel (cupy/cuda/driver.cpp:3262)
check_status(status)
File "cupy/cuda/driver.pyx", line 70, in cupy.cuda.driver.check_status (cupy/cuda/driver.cpp:1481)
raise CUDADriverError(status)
cupy.cuda.driver.CUDADriverError: CUDA_ERROR_INVALID_VALUE: invalid argument
|
cupy.cuda.driver.CUDADriverError
|
def _get_positive_axis(ndim, axis):
a = axis
if a < 0:
a += ndim
if a < 0 or a >= ndim:
raise core.core._AxisError("axis {} out of bounds [0, {})".format(axis, ndim))
return a
|
def _get_positive_axis(ndim, axis):
a = axis
if a < 0:
a += ndim
if a < 0 or a >= ndim:
raise IndexError("axis {} out of bounds [0, {})".format(axis, ndim))
return a
|
https://github.com/cupy/cupy/issues/342
|
import cupy
cupy.cumprod
<function cumprod at 0x7f9a460b4c80>
cupy.cumprod(cupy.ndarray(()), axis=-10000)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/cupy/math/sumprod.py", line 206, in cumprod
raise numpy.AxisError('axis(={}) out of bounds'.format(axis))
AttributeError: module 'numpy' has no attribute 'AxisError'
|
AttributeError
|
def roll(a, shift, axis=None):
"""Roll array elements along a given axis.
Args:
a (~cupy.ndarray): Array to be rolled.
shift (int): The number of places by which elements are shifted.
axis (int or None): The axis along which elements are shifted.
If ``axis`` is ``None``, the array is flattened before shifting,
and after that it is reshaped to the original shape.
Returns:
~cupy.ndarray: Output array.
.. seealso:: :func:`numpy.roll`
"""
if axis is None:
if a.size == 0:
return a
size = a.size
ra = a.ravel()
shift %= size
res = cupy.empty((size,), a.dtype)
res[:shift] = ra[size - shift :]
res[shift:] = ra[: size - shift]
return res.reshape(a.shape)
else:
axis = int(axis)
if axis < 0:
axis += a.ndim
if not 0 <= axis < a.ndim:
raise core.core._AxisError(
"axis must be >= %d and < %d" % (-a.ndim, a.ndim)
)
size = a.shape[axis]
if size == 0:
return a
shift %= size
prev = (slice(None),) * axis
rest = (slice(None),) * (a.ndim - axis - 1)
# Roll only the dimensiont at the given axis
# ind1 is [:, ..., size-shift:, ..., :]
# ind2 is [:, ..., :size-shift, ..., :]
ind1 = prev + (slice(size - shift, None, None),) + rest
ind2 = prev + (slice(None, size - shift, None),) + rest
r_ind1 = prev + (slice(None, shift, None),) + rest
r_ind2 = prev + (slice(shift, None, None),) + rest
res = cupy.empty_like(a)
res[r_ind1] = a[ind1]
res[r_ind2] = a[ind2]
return res
|
def roll(a, shift, axis=None):
"""Roll array elements along a given axis.
Args:
a (~cupy.ndarray): Array to be rolled.
shift (int): The number of places by which elements are shifted.
axis (int or None): The axis along which elements are shifted.
If ``axis`` is ``None``, the array is flattened before shifting,
and after that it is reshaped to the original shape.
Returns:
~cupy.ndarray: Output array.
.. seealso:: :func:`numpy.roll`
"""
if axis is None:
if a.size == 0:
return a
size = a.size
ra = a.ravel()
shift %= size
res = cupy.empty((size,), a.dtype)
res[:shift] = ra[size - shift :]
res[shift:] = ra[: size - shift]
return res.reshape(a.shape)
else:
axis = int(axis)
if axis < 0:
axis += a.ndim
if not 0 <= axis < a.ndim:
raise ValueError("axis must be >= %d and < %d" % (-a.ndim, a.ndim))
size = a.shape[axis]
if size == 0:
return a
shift %= size
prev = (slice(None),) * axis
rest = (slice(None),) * (a.ndim - axis - 1)
# Roll only the dimensiont at the given axis
# ind1 is [:, ..., size-shift:, ..., :]
# ind2 is [:, ..., :size-shift, ..., :]
ind1 = prev + (slice(size - shift, None, None),) + rest
ind2 = prev + (slice(None, size - shift, None),) + rest
r_ind1 = prev + (slice(None, shift, None),) + rest
r_ind2 = prev + (slice(shift, None, None),) + rest
res = cupy.empty_like(a)
res[r_ind1] = a[ind1]
res[r_ind2] = a[ind2]
return res
|
https://github.com/cupy/cupy/issues/342
|
import cupy
cupy.cumprod
<function cumprod at 0x7f9a460b4c80>
cupy.cumprod(cupy.ndarray(()), axis=-10000)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/cupy/math/sumprod.py", line 206, in cumprod
raise numpy.AxisError('axis(={}) out of bounds'.format(axis))
AttributeError: module 'numpy' has no attribute 'AxisError'
|
AttributeError
|
def cumsum(a, axis=None, dtype=None, out=None):
"""Returns the cumulative sum of an array along a given axis.
Args:
a (cupy.ndarray): Input array.
axis (int): Axis along which the cumulative sum is taken. If it is not
specified, the input is flattened.
dtype: Data type specifier.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: The result array.
.. seealso:: :func:`numpy.cumsum`
"""
if out is None:
if dtype is None:
kind = a.dtype.kind
if kind == "b":
dtype = numpy.dtype("l")
elif kind == "i" and a.dtype.itemsize < numpy.dtype("l").itemsize:
dtype = numpy.dtype("l")
elif kind == "u" and a.dtype.itemsize < numpy.dtype("L").itemsize:
dtype = numpy.dtype("L")
else:
dtype = a.dtype
out = a.astype(dtype)
else:
out[...] = a
if axis is None:
out = out.ravel()
elif not (-a.ndim <= axis < a.ndim):
raise core.core._AxisError("axis(={}) out of bounds".format(axis))
else:
return _proc_as_batch(_cumsum_batch, out, axis=axis)
kern = core.ElementwiseKernel(
"int32 pos",
"raw T x",
"""
if (i & pos) {
x[i] += x[i ^ pos | (pos - 1)];
}
""",
"cumsum_kernel",
)
pos = 1
while pos < out.size:
kern(pos, out, size=out.size)
pos <<= 1
return out
|
def cumsum(a, axis=None, dtype=None, out=None):
"""Returns the cumulative sum of an array along a given axis.
Args:
a (cupy.ndarray): Input array.
axis (int): Axis along which the cumulative sum is taken. If it is not
specified, the input is flattened.
dtype: Data type specifier.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: The result array.
.. seealso:: :func:`numpy.cumsum`
"""
if out is None:
if dtype is None:
kind = a.dtype.kind
if kind == "b":
dtype = numpy.dtype("l")
elif kind == "i" and a.dtype.itemsize < numpy.dtype("l").itemsize:
dtype = numpy.dtype("l")
elif kind == "u" and a.dtype.itemsize < numpy.dtype("L").itemsize:
dtype = numpy.dtype("L")
else:
dtype = a.dtype
out = a.astype(dtype)
else:
out[...] = a
if axis is None:
out = out.ravel()
elif not (-a.ndim <= axis < a.ndim):
raise ValueError("axis(={}) out of bounds".format(axis))
else:
return _proc_as_batch(_cumsum_batch, out, axis=axis)
kern = core.ElementwiseKernel(
"int32 pos",
"raw T x",
"""
if (i & pos) {
x[i] += x[i ^ pos | (pos - 1)];
}
""",
"cumsum_kernel",
)
pos = 1
while pos < out.size:
kern(pos, out, size=out.size)
pos <<= 1
return out
|
https://github.com/cupy/cupy/issues/342
|
import cupy
cupy.cumprod
<function cumprod at 0x7f9a460b4c80>
cupy.cumprod(cupy.ndarray(()), axis=-10000)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/cupy/math/sumprod.py", line 206, in cumprod
raise numpy.AxisError('axis(={}) out of bounds'.format(axis))
AttributeError: module 'numpy' has no attribute 'AxisError'
|
AttributeError
|
def cumprod(a, axis=None, dtype=None, out=None):
"""Returns the cumulative product of an array along a given axis.
Args:
a (cupy.ndarray): Input array.
axis (int): Axis along which the cumulative product is taken. If it is
not specified, the input is flattened.
dtype: Data type specifier.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: The result array.
.. seealso:: :func:`numpy.cumprod`
"""
if out is None:
if dtype is None:
kind = a.dtype.kind
if kind == "b":
dtype = numpy.dtype("l")
elif kind == "i" and a.dtype.itemsize < numpy.dtype("l").itemsize:
dtype = numpy.dtype("l")
elif kind == "u" and a.dtype.itemsize < numpy.dtype("L").itemsize:
dtype = numpy.dtype("L")
else:
dtype = a.dtype
out = a.astype(dtype)
else:
out[...] = a
if axis is None:
out = out.ravel()
elif not (-a.ndim <= axis < a.ndim):
raise core.core._AxisError("axis(={}) out of bounds".format(axis))
else:
return _proc_as_batch(_cumprod_batch, out, axis=axis)
kern = core.ElementwiseKernel(
"int64 pos",
"raw T x",
"""
if (i & pos) {
x[i] *= x[i ^ pos | (pos - 1)];
}
""",
"cumprod_kernel",
)
pos = 1
while pos < out.size:
kern(pos, out, size=out.size)
pos <<= 1
return out
|
def cumprod(a, axis=None, dtype=None, out=None):
"""Returns the cumulative product of an array along a given axis.
Args:
a (cupy.ndarray): Input array.
axis (int): Axis along which the cumulative product is taken. If it is
not specified, the input is flattened.
dtype: Data type specifier.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: The result array.
.. seealso:: :func:`numpy.cumprod`
"""
if out is None:
if dtype is None:
kind = a.dtype.kind
if kind == "b":
dtype = numpy.dtype("l")
elif kind == "i" and a.dtype.itemsize < numpy.dtype("l").itemsize:
dtype = numpy.dtype("l")
elif kind == "u" and a.dtype.itemsize < numpy.dtype("L").itemsize:
dtype = numpy.dtype("L")
else:
dtype = a.dtype
out = a.astype(dtype)
else:
out[...] = a
if axis is None:
out = out.ravel()
elif not (-a.ndim <= axis < a.ndim):
raise ValueError("axis(={}) out of bounds".format(axis))
else:
return _proc_as_batch(_cumprod_batch, out, axis=axis)
kern = core.ElementwiseKernel(
"int64 pos",
"raw T x",
"""
if (i & pos) {
x[i] *= x[i ^ pos | (pos - 1)];
}
""",
"cumprod_kernel",
)
pos = 1
while pos < out.size:
kern(pos, out, size=out.size)
pos <<= 1
return out
|
https://github.com/cupy/cupy/issues/342
|
import cupy
cupy.cumprod
<function cumprod at 0x7f9a460b4c80>
cupy.cumprod(cupy.ndarray(()), axis=-10000)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/cupy/math/sumprod.py", line 206, in cumprod
raise numpy.AxisError('axis(={}) out of bounds'.format(axis))
AttributeError: module 'numpy' has no attribute 'AxisError'
|
AttributeError
|
def bincount(x, weights=None, minlength=None):
"""Count number of occurrences of each value in array of non-negative ints.
Args:
x (cupy.ndarray): Input array.
weights (cupy.ndarray): Weights array which has the same shape as
``x``.
minlength (int): A minimum number of bins for the output array.
Returns:
cupy.ndarray: The result of binning the input array. The length of
output is equal to ``max(cupy.max(x) + 1, minlength)``.
.. seealso:: :func:`numpy.bincount`
"""
if x.ndim > 1:
raise ValueError("object too deep for desired array")
if x.ndim < 1:
raise ValueError("object of too small depth for desired array")
if x.dtype.kind == "f":
raise TypeError("x must be int array")
if (x < 0).any():
raise ValueError("The first argument of bincount must be non-negative")
if weights is not None and x.shape != weights.shape:
raise ValueError("The weights and list don't have the same length.")
if minlength is not None:
minlength = int(minlength)
if minlength < 0:
raise ValueError("minlength must be non-negative")
size = int(cupy.max(x)) + 1
if minlength is not None:
size = max(size, minlength)
if weights is None:
# atomicAdd for int64 is not provided
b = cupy.zeros((size,), dtype=cupy.int32)
cupy.ElementwiseKernel(
"S x", "raw U bin", "atomicAdd(&bin[x], 1)", "bincount_kernel"
)(x, b)
b = b.astype(numpy.intp)
else:
# atomicAdd for float64 is not provided
b = cupy.zeros((size,), dtype=cupy.float32)
cupy.ElementwiseKernel(
"S x, T w",
"raw U bin",
"atomicAdd(&bin[x], w)",
"bincount_with_weight_kernel",
)(x, weights, b)
b = b.astype(cupy.float64)
return b
|
def bincount(x, weights=None, minlength=None):
"""Count number of occurrences of each value in array of non-negative ints.
Args:
x (cupy.ndarray): Input array.
weights (cupy.ndarray): Weights array which has the same shape as
``x``.
minlength (int): A minimum number of bins for the output array.
Returns:
cupy.ndarray: The result of binning the input array. The length of
output is equal to ``max(cupy.max(x) + 1, minlength)``.
.. seealso:: :func:`numpy.bincount`
"""
if x.ndim > 1:
raise ValueError("object too deep for desired array")
if x.ndim < 1:
raise ValueError("object of too small depth for desired array")
if x.dtype.kind == "f":
raise TypeError("x must be int array")
if (x < 0).any():
raise ValueError("The first argument of bincount must be non-negative")
if weights is not None and x.shape != weights.shape:
raise ValueError("The weights and list don't have the same length.")
if minlength is not None:
minlength = int(minlength)
if minlength <= 0:
raise ValueError("minlength must be positive")
size = int(cupy.max(x)) + 1
if minlength is not None:
size = max(size, minlength)
if weights is None:
# atomicAdd for int64 is not provided
b = cupy.zeros((size,), dtype=cupy.int32)
cupy.ElementwiseKernel(
"S x", "raw U bin", "atomicAdd(&bin[x], 1)", "bincount_kernel"
)(x, b)
b = b.astype(numpy.intp)
else:
# atomicAdd for float64 is not provided
b = cupy.zeros((size,), dtype=cupy.float32)
cupy.ElementwiseKernel(
"S x, T w",
"raw U bin",
"atomicAdd(&bin[x], w)",
"bincount_with_weight_kernel",
)(x, weights, b)
b = b.astype(cupy.float64)
return b
|
https://github.com/cupy/cupy/issues/342
|
import cupy
cupy.cumprod
<function cumprod at 0x7f9a460b4c80>
cupy.cumprod(cupy.ndarray(()), axis=-10000)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/cupy/math/sumprod.py", line 206, in cumprod
raise numpy.AxisError('axis(={}) out of bounds'.format(axis))
AttributeError: module 'numpy' has no attribute 'AxisError'
|
AttributeError
|
def _convert_from_ufunc(ufunc):
nin = ufunc.nin
nout = ufunc.nout
def get_mem(args):
for i in args:
if type(i) == _FusionRef:
return i._mem
raise Exception("number of ndarray arguments must be more than 0")
def can_cast1(args, ty_ins):
for i in six.moves.range(nin):
if args[i].const is None:
if not numpy.can_cast(args[i].ty, ty_ins[i]):
return False
else:
if not numpy.can_cast(args[i].const, ty_ins[i]):
return False
return True
def can_cast2(args, ty_ins):
for i in six.moves.range(nin):
if not numpy.can_cast(args[i].ty, ty_ins[i]):
return False
return True
def res(*args, **kwargs):
mem = get_mem(args)
var_list = [_normalize_arg(_, mem) for _ in args]
if "out" in kwargs:
var_list.append(_normalize_arg(kwargs.pop("out"), mem))
if kwargs:
raise TypeError("Wrong arguments %s" % kwargs)
assert nin <= len(var_list) <= nin + nout
in_vars = var_list[:nin]
out_vars = var_list[nin:]
can_cast = can_cast1 if _should_use_min_scalar(in_vars) else can_cast2
for ty_ins, ty_outs, op in ufunc._ops:
ty_ins = [numpy.dtype(_) for _ in ty_ins]
ty_outs = [numpy.dtype(_) for _ in ty_outs]
if can_cast(in_vars, ty_ins):
param_names = ["in%d" % i for i in six.moves.range(nin)] + [
"out%d" % i for i in six.moves.range(nout)
]
ret = []
for i in six.moves.range(nout):
if i >= len(out_vars):
v = mem.get_fresh(ty_outs[i])
out_vars.append(v)
ret.append(_FusionRef(v, mem))
elif numpy.can_cast(ty_outs[i], out_vars[i].ty, "same_kind"):
v = out_vars[i]
ret.append(_FusionRef(v, mem))
else:
raise TypeError(
"output (typecode '{}') could not be coerced "
"to provided output parameter (typecode '{}') "
"according to the casting rule "
'"same_kind"'.format(ty_outs[i].char, out_vars[i].ty.char)
)
mem.set_op(
ufunc.name,
op,
param_names,
nin,
nout,
in_vars,
out_vars,
ty_ins + ty_outs,
)
return ret[0] if len(ret) == 1 else tuple(ret)
raise TypeError(
"Invalid type cast in '{}': {} -> {}".format(
ufunc.name, [_.ty for _ in in_vars], [_.ty for _ in out_vars]
)
)
return res
|
def _convert_from_ufunc(ufunc):
nin = ufunc.nin
nout = ufunc.nout
def get_mem(args):
for i in args:
if type(i) == _FusionRef:
return i._mem
raise Exception("number of ndarray arguments must be more than 0")
def can_cast1(args, ty_ins):
for i in six.moves.range(nin):
if args[i].const is None:
if not numpy.can_cast(args[i].ty, ty_ins[i]):
return False
else:
if not numpy.can_cast(args[i].const, ty_ins[i]):
return False
return True
def can_cast2(args, ty_ins):
for i in six.moves.range(nin):
if not numpy.can_cast(args[i].ty, ty_ins[i]):
return False
return True
def res(*args, **kwargs):
mem = get_mem(args)
var_list = [_normalize_arg(_, mem) for _ in args]
if "out" in kwargs:
var_list.append(_normalize_arg.pop("out"))
if kwargs:
raise TypeError("Wrong arguments %s" % kwargs)
assert nin <= len(var_list) and len(var_list) <= nin + nout
in_vars = var_list[:nin]
out_vars = var_list[nin:]
can_cast = can_cast1 if _should_use_min_scalar(in_vars) else can_cast2
for ty_ins, ty_outs, op in ufunc._ops:
ty_ins = [numpy.dtype(_) for _ in ty_ins]
ty_outs = [numpy.dtype(_) for _ in ty_outs]
if can_cast(in_vars, ty_ins):
param_names = ["in%d" % i for i in six.moves.range(nin)] + [
"out%d" % i for i in six.moves.range(nout)
]
ret = []
for i in six.moves.range(nout):
if i >= len(out_vars):
v = mem.get_fresh(ty_outs[i])
out_vars.append(v)
ret.append(_FusionRef(v, mem))
elif numpy.can_cast(ty_outs[i], out_vars[i].ty, "same_kind"):
v = out_vars[i]
ret.append(_FusionRef(v, mem))
else:
raise TypeError(
"Cannot cast from %s to %s" % (ty_outs[i], out_vars[i].ty)
+ " with casting rule 'same_kind'"
)
mem.set_op(
ufunc.name,
op,
param_names,
nin,
nout,
in_vars,
out_vars,
ty_ins + ty_outs,
)
return ret[0] if len(ret) == 1 else tuple(ret)
raise TypeError(
"Invalid type cast in '{}': {} -> {}".format(
ufunc.name, [_.ty for _ in in_vars], [_.ty for _ in out_vars]
)
)
return res
|
https://github.com/cupy/cupy/issues/209
|
Traceback (most recent call last):
File "test-out.py", line 13, in <module>
func(a, b, z)
File "/repos/cupy/cupy/core/fusion.py", line 602, in __call__
return self._call(*args, **kwargs)
File "/repos/cupy/cupy/core/fusion.py", line 628, in _call
self.post_map, self.identity, types)
File "/repos/cupy/cupy/core/fusion.py", line 499, in _get_fusion
out_refs = func(*in_refs)
File "test-out.py", line 11, in func
xp.add(a, b, out=z)
File "/repos/cupy/cupy/core/fusion.py", line 710, in __call__
return _convert(self._fusion_op)(*args, **kwargs)
File "/repos/cupy/cupy/core/fusion.py", line 339, in res
var_list.append(_normalize_arg.pop('out'))
AttributeError: 'function' object has no attribute 'pop'
|
AttributeError
|
def res(*args, **kwargs):
mem = get_mem(args)
var_list = [_normalize_arg(_, mem) for _ in args]
if "out" in kwargs:
var_list.append(_normalize_arg(kwargs.pop("out"), mem))
if kwargs:
raise TypeError("Wrong arguments %s" % kwargs)
assert nin <= len(var_list) <= nin + nout
in_vars = var_list[:nin]
out_vars = var_list[nin:]
can_cast = can_cast1 if _should_use_min_scalar(in_vars) else can_cast2
for ty_ins, ty_outs, op in ufunc._ops:
ty_ins = [numpy.dtype(_) for _ in ty_ins]
ty_outs = [numpy.dtype(_) for _ in ty_outs]
if can_cast(in_vars, ty_ins):
param_names = ["in%d" % i for i in six.moves.range(nin)] + [
"out%d" % i for i in six.moves.range(nout)
]
ret = []
for i in six.moves.range(nout):
if i >= len(out_vars):
v = mem.get_fresh(ty_outs[i])
out_vars.append(v)
ret.append(_FusionRef(v, mem))
elif numpy.can_cast(ty_outs[i], out_vars[i].ty, "same_kind"):
v = out_vars[i]
ret.append(_FusionRef(v, mem))
else:
raise TypeError(
"output (typecode '{}') could not be coerced "
"to provided output parameter (typecode '{}') "
"according to the casting rule "
'"same_kind"'.format(ty_outs[i].char, out_vars[i].ty.char)
)
mem.set_op(
ufunc.name,
op,
param_names,
nin,
nout,
in_vars,
out_vars,
ty_ins + ty_outs,
)
return ret[0] if len(ret) == 1 else tuple(ret)
raise TypeError(
"Invalid type cast in '{}': {} -> {}".format(
ufunc.name, [_.ty for _ in in_vars], [_.ty for _ in out_vars]
)
)
|
def res(*args, **kwargs):
mem = get_mem(args)
var_list = [_normalize_arg(_, mem) for _ in args]
if "out" in kwargs:
var_list.append(_normalize_arg.pop("out"))
if kwargs:
raise TypeError("Wrong arguments %s" % kwargs)
assert nin <= len(var_list) and len(var_list) <= nin + nout
in_vars = var_list[:nin]
out_vars = var_list[nin:]
can_cast = can_cast1 if _should_use_min_scalar(in_vars) else can_cast2
for ty_ins, ty_outs, op in ufunc._ops:
ty_ins = [numpy.dtype(_) for _ in ty_ins]
ty_outs = [numpy.dtype(_) for _ in ty_outs]
if can_cast(in_vars, ty_ins):
param_names = ["in%d" % i for i in six.moves.range(nin)] + [
"out%d" % i for i in six.moves.range(nout)
]
ret = []
for i in six.moves.range(nout):
if i >= len(out_vars):
v = mem.get_fresh(ty_outs[i])
out_vars.append(v)
ret.append(_FusionRef(v, mem))
elif numpy.can_cast(ty_outs[i], out_vars[i].ty, "same_kind"):
v = out_vars[i]
ret.append(_FusionRef(v, mem))
else:
raise TypeError(
"Cannot cast from %s to %s" % (ty_outs[i], out_vars[i].ty)
+ " with casting rule 'same_kind'"
)
mem.set_op(
ufunc.name,
op,
param_names,
nin,
nout,
in_vars,
out_vars,
ty_ins + ty_outs,
)
return ret[0] if len(ret) == 1 else tuple(ret)
raise TypeError(
"Invalid type cast in '{}': {} -> {}".format(
ufunc.name, [_.ty for _ in in_vars], [_.ty for _ in out_vars]
)
)
|
https://github.com/cupy/cupy/issues/209
|
Traceback (most recent call last):
File "test-out.py", line 13, in <module>
func(a, b, z)
File "/repos/cupy/cupy/core/fusion.py", line 602, in __call__
return self._call(*args, **kwargs)
File "/repos/cupy/cupy/core/fusion.py", line 628, in _call
self.post_map, self.identity, types)
File "/repos/cupy/cupy/core/fusion.py", line 499, in _get_fusion
out_refs = func(*in_refs)
File "test-out.py", line 11, in func
xp.add(a, b, out=z)
File "/repos/cupy/cupy/core/fusion.py", line 710, in __call__
return _convert(self._fusion_op)(*args, **kwargs)
File "/repos/cupy/cupy/core/fusion.py", line 339, in res
var_list.append(_normalize_arg.pop('out'))
AttributeError: 'function' object has no attribute 'pop'
|
AttributeError
|
def _get_fusion(func, nin, reduce, post_map, identity, input_types, name=None):
in_vars = [_FusionVar(i, t) for i, t in enumerate(input_types)]
mem = _FusionMem(in_vars)
in_refs = [_FusionRef(_, mem) for _ in in_vars]
out_refs = func(*in_refs)
out_refs = list(out_refs) if type(out_refs) == tuple else [out_refs]
out_refs = [_ for _ in out_refs if _ is not None]
out_refs = [_FusionRef(_normalize_arg(_, mem), mem) for _ in out_refs]
out_vars = [_normalize_arg(copy(_), mem) for _ in out_refs]
nout = len(out_vars)
op_list = mem.op_list
tmpvars = mem.var_list[nin:-nout] if nout > 0 else mem.var_list[nin:]
in_params = ", ".join(_get_params(in_vars))
out_params = ", ".join(_get_params(out_vars))
operation = "".join(_get_declaration_from_var(_) for _ in tmpvars)
operation += "".join(_get_declaration_from_op(_) for _ in op_list)
operation += "\n".join(_get_operation_code(_) for _ in op_list)
if name is None:
name = "fusion__" + "__".join(build_kernel_name(_) for _ in op_list)
if reduce is None:
if not out_params:
in_params = ", ".join(_get_params(in_vars[:-1]))
out_params = ", ".join(_get_params([in_vars[-1]]))
submodules = _gather_submodules(op_list)
submodule_code = "".join(_get_submodule_code(_) for _ in submodules.values())
return core.ElementwiseKernel(
in_params, out_params, operation, preamble=submodule_code, name=name
)
else:
if nout != 1:
raise Exception("Wrong number of number of arguments")
# pre-map
pre_type = out_vars[0].ty
pre_code = _get_pre_code(in_vars, out_vars, operation)
# reduce
reduce_op = _get_reduce_op(reduce._raw, pre_type)
reduce_code = reduce_op[2][1]
reduce_type = numpy.dtype(reduce_op[1][0])
rtype = reduce_op[2][3]
post_type = "type_in0_raw" if rtype is None else rtype
pre_code += "typedef %s type_in0_raw;\n" % _dtype_to_ctype[reduce_type]
# post-map
post_in = [_FusionVar(0, reduce_type)]
mem = _FusionMem(post_in)
post_in_ref = [_FusionRef(_, mem) for _ in post_in]
post_out = _normalize_arg(post_map(*post_in_ref), mem)
if type(post_out) == tuple:
raise Exception("Can't reduce a tuple")
post_vars = mem.var_list
post_ops = mem.op_list
post_code = "".join(_get_declaration_from_var(_) for _ in post_vars[1:])
post_code += "".join(_get_declaration_from_op(_) for _ in post_ops)
post_code += "\n".join(_get_operation_code(_) for _ in post_ops)
post_code = _get_post_code(post_vars, post_code, post_out)
post_code += _get_fix_code(post_type, reduce_type, reduce_op[2][2])
submodules = _gather_submodules(op_list + post_ops)
submodule_code = "".join(_get_submodule_code(v) for v in submodules.values())
submodule_code += reduce._raw._preamble + pre_code + post_code
operation_args = ["v" + str(i) for i in six.moves.range(nin)]
operation = "_pre_map(" + ", ".join(operation_args) + ")"
out_params = "%s res" % post_out.ty
return core.ReductionKernel(
in_params,
out_params,
operation,
reduce_code,
"res = _post_map(_post_fix(a))",
identity,
reduce_type=post_type,
preamble=submodule_code,
)
|
def _get_fusion(func, nin, reduce, post_map, identity, input_types, name=None):
in_vars = [_FusionVar(i, t) for i, t in enumerate(input_types)]
mem = _FusionMem(in_vars)
in_refs = [_FusionRef(_, mem) for _ in in_vars]
out_refs = func(*in_refs)
out_refs = list(out_refs) if type(out_refs) == tuple else [out_refs]
out_refs = filter(lambda i: i is not None, out_refs)
out_refs = [_FusionRef(_normalize_arg(_, mem), mem) for _ in out_refs]
out_vars = [_normalize_arg(copy(_), mem) for _ in out_refs]
nout = len(out_vars)
op_list = mem.op_list
tmpvars = mem.var_list[nin:-nout] if nout > 0 else mem.var_list[nin:]
in_params = ", ".join(_get_params(in_vars))
out_params = ", ".join(_get_params(out_vars))
operation = "".join(_get_declaration_from_var(_) for _ in tmpvars)
operation += "".join(_get_declaration_from_op(_) for _ in op_list)
operation += "\n".join(_get_operation_code(_) for _ in op_list)
if name is None:
name = "fusion__" + "__".join(build_kernel_name(_) for _ in op_list)
if reduce is None:
if not out_params:
in_params = ", ".join(_get_params(in_vars[:-1]))
out_params = ", ".join(_get_params([in_vars[-1]]))
submodules = _gather_submodules(op_list)
submodule_code = "".join(_get_submodule_code(_) for _ in submodules.values())
return core.ElementwiseKernel(
in_params, out_params, operation, preamble=submodule_code, name=name
)
else:
if nout != 1:
raise Exception("Wrong number of number of arguments")
# pre-map
pre_type = out_vars[0].ty
pre_code = _get_pre_code(in_vars, out_vars, operation)
# reduce
reduce_op = _get_reduce_op(reduce._raw, pre_type)
reduce_code = reduce_op[2][1]
reduce_type = numpy.dtype(reduce_op[1][0])
rtype = reduce_op[2][3]
post_type = "type_in0_raw" if rtype is None else rtype
pre_code += "typedef %s type_in0_raw;\n" % _dtype_to_ctype[reduce_type]
# post-map
post_in = [_FusionVar(0, reduce_type)]
mem = _FusionMem(post_in)
post_in_ref = [_FusionRef(_, mem) for _ in post_in]
post_out = _normalize_arg(post_map(*post_in_ref), mem)
if type(post_out) == tuple:
raise Exception("Can't reduce a tuple")
post_vars = mem.var_list
post_ops = mem.op_list
post_code = "".join(_get_declaration_from_var(_) for _ in post_vars[1:])
post_code += "".join(_get_declaration_from_op(_) for _ in post_ops)
post_code += "\n".join(_get_operation_code(_) for _ in post_ops)
post_code = _get_post_code(post_vars, post_code, post_out)
post_code += _get_fix_code(post_type, reduce_type, reduce_op[2][2])
submodules = _gather_submodules(op_list + post_ops)
submodule_code = "".join(_get_submodule_code(v) for v in submodules.values())
submodule_code += reduce._raw._preamble + pre_code + post_code
operation_args = ["v" + str(i) for i in six.moves.range(nin)]
operation = "_pre_map(" + ", ".join(operation_args) + ")"
out_params = "%s res" % post_out.ty
return core.ReductionKernel(
in_params,
out_params,
operation,
reduce_code,
"res = _post_map(_post_fix(a))",
identity,
reduce_type=post_type,
preamble=submodule_code,
)
|
https://github.com/cupy/cupy/issues/209
|
Traceback (most recent call last):
File "test-out.py", line 13, in <module>
func(a, b, z)
File "/repos/cupy/cupy/core/fusion.py", line 602, in __call__
return self._call(*args, **kwargs)
File "/repos/cupy/cupy/core/fusion.py", line 628, in _call
self.post_map, self.identity, types)
File "/repos/cupy/cupy/core/fusion.py", line 499, in _get_fusion
out_refs = func(*in_refs)
File "test-out.py", line 11, in func
xp.add(a, b, out=z)
File "/repos/cupy/cupy/core/fusion.py", line 710, in __call__
return _convert(self._fusion_op)(*args, **kwargs)
File "/repos/cupy/cupy/core/fusion.py", line 339, in res
var_list.append(_normalize_arg.pop('out'))
AttributeError: 'function' object has no attribute 'pop'
|
AttributeError
|
def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]:
"""Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
result: List[ProtoComment] = []
if not prefix or "#" not in prefix:
return result
consumed = 0
nlines = 0
ignored_lines = 0
for index, line in enumerate(re.split("\r?\n", prefix)):
consumed += len(line) + 1 # adding the length of the split '\n'
line = line.lstrip()
if not line:
nlines += 1
if not line.startswith("#"):
# Escaped newlines outside of a comment are not really newlines at
# all. We treat a single-line comment following an escaped newline
# as a simple trailing comment.
if line.endswith("\\"):
ignored_lines += 1
continue
if index == ignored_lines and not is_endmarker:
comment_type = token.COMMENT # simple trailing comment
else:
comment_type = STANDALONE_COMMENT
comment = make_comment(line)
result.append(
ProtoComment(
type=comment_type, value=comment, newlines=nlines, consumed=consumed
)
)
nlines = 0
return result
|
def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]:
"""Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
result: List[ProtoComment] = []
if not prefix or "#" not in prefix:
return result
consumed = 0
nlines = 0
ignored_lines = 0
for index, line in enumerate(prefix.split("\n")):
consumed += len(line) + 1 # adding the length of the split '\n'
line = line.lstrip()
if not line:
nlines += 1
if not line.startswith("#"):
# Escaped newlines outside of a comment are not really newlines at
# all. We treat a single-line comment following an escaped newline
# as a simple trailing comment.
if line.endswith("\\"):
ignored_lines += 1
continue
if index == ignored_lines and not is_endmarker:
comment_type = token.COMMENT # simple trailing comment
else:
comment_type = STANDALONE_COMMENT
comment = make_comment(line)
result.append(
ProtoComment(
type=comment_type, value=comment, newlines=nlines, consumed=consumed
)
)
nlines = 0
return result
|
https://github.com/psf/black/issues/1913
|
fuzz run-test: commands[2] | coverage run fuzz.py
<string>:1: SyntaxWarning: "is" with a literal. Did you mean "=="?
<string>:1: SyntaxWarning: "is" with a literal. Did you mean "=="?
<string>:1: SyntaxWarning: "is" with a literal. Did you mean "=="?
<string>:1: SyntaxWarning: "is" with a literal. Did you mean "=="?
<string>:1: SyntaxWarning: "is" with a literal. Did you mean "=="?
<string>:1: SyntaxWarning: "is" with a literal. Did you mean "=="?
Highest target scores:
5 (label='(hypothesmith) number of unique ast node types')
9 (label='(hypothesmith) instructions in bytecode')
10 (label='(hypothesmith from_node) number of unique ast node types')
13 (label='(hypothesmith) total number of ast nodes')
35 (label='(hypothesmith from_node) total number of ast nodes')
48 (label='(hypothesmith from_node) instructions in bytecode')
Traceback (most recent call last):
File "<string>", line 7, in __init__
File "/home/cooper/repos/black/.tox/fuzz/lib/python3.9/site-packages/libcst/_nodes/base.py", line 115, in __post_init__
self._validate()
File "/home/cooper/repos/black/.tox/fuzz/lib/python3.9/site-packages/libcst/_nodes/expression.py", line 1148, in _validate
raise CSTValidationError(
File "<string>", line None
libcst._nodes.base.CSTValidationError: Must have at least one space around comparison operator.
|
libcst._nodes.base.CSTValidationError
|
def mark(self, leaf: Leaf) -> None:
"""Mark `leaf` with bracket-related metadata. Keep track of delimiters.
All leaves receive an int `bracket_depth` field that stores how deep
within brackets a given leaf is. 0 means there are no enclosing brackets
that started on this line.
If a leaf is itself a closing bracket, it receives an `opening_bracket`
field that it forms a pair with. This is a one-directional link to
avoid reference cycles.
If a leaf is a delimiter (a token on which Black can split the line if
needed) and it's on depth 0, its `id()` is stored in the tracker's
`delimiters` field.
"""
if leaf.type == token.COMMENT:
return
self.maybe_decrement_after_for_loop_variable(leaf)
self.maybe_decrement_after_lambda_arguments(leaf)
if leaf.type in CLOSING_BRACKETS:
self.depth -= 1
try:
opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
except KeyError as e:
raise BracketMatchError(
"Unable to match a closing bracket to the following opening"
f" bracket: {leaf}"
) from e
leaf.opening_bracket = opening_bracket
if not leaf.value:
self.invisible.append(leaf)
leaf.bracket_depth = self.depth
if self.depth == 0:
delim = is_split_before_delimiter(leaf, self.previous)
if delim and self.previous is not None:
self.delimiters[id(self.previous)] = delim
else:
delim = is_split_after_delimiter(leaf, self.previous)
if delim:
self.delimiters[id(leaf)] = delim
if leaf.type in OPENING_BRACKETS:
self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
self.depth += 1
if not leaf.value:
self.invisible.append(leaf)
self.previous = leaf
self.maybe_increment_lambda_arguments(leaf)
self.maybe_increment_for_loop_variable(leaf)
|
def mark(self, leaf: Leaf) -> None:
"""Mark `leaf` with bracket-related metadata. Keep track of delimiters.
All leaves receive an int `bracket_depth` field that stores how deep
within brackets a given leaf is. 0 means there are no enclosing brackets
that started on this line.
If a leaf is itself a closing bracket, it receives an `opening_bracket`
field that it forms a pair with. This is a one-directional link to
avoid reference cycles.
If a leaf is a delimiter (a token on which Black can split the line if
needed) and it's on depth 0, its `id()` is stored in the tracker's
`delimiters` field.
"""
if leaf.type == token.COMMENT:
return
self.maybe_decrement_after_for_loop_variable(leaf)
self.maybe_decrement_after_lambda_arguments(leaf)
if leaf.type in CLOSING_BRACKETS:
self.depth -= 1
opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
leaf.opening_bracket = opening_bracket
if not leaf.value:
self.invisible.append(leaf)
leaf.bracket_depth = self.depth
if self.depth == 0:
delim = is_split_before_delimiter(leaf, self.previous)
if delim and self.previous is not None:
self.delimiters[id(self.previous)] = delim
else:
delim = is_split_after_delimiter(leaf, self.previous)
if delim:
self.delimiters[id(leaf)] = delim
if leaf.type in OPENING_BRACKETS:
self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
self.depth += 1
if not leaf.value:
self.invisible.append(leaf)
self.previous = leaf
self.maybe_increment_lambda_arguments(leaf)
self.maybe_increment_for_loop_variable(leaf)
|
https://github.com/psf/black/issues/1597
|
$ cat author.py
class xxxxxxxxxxxxxxxxxxxxx(xxxx.xxxxxxxxxxxxx):
def xxxxxxx_xxxxxx(xxxx):
assert xxxxxxx_xxxx in [
x.xxxxx.xxxxxx.xxxxx.xxxxxx,
x.xxxxx.xxxxxx.xxxxx.xxxx,
], ("xxxxxxxxxxx xxxxxxx xxxx (xxxxxx xxxx) %x xxx xxxxx" % xxxxxxx_xxxx)
$ black -v author.py
Using configuration from /Users/jelle/py/black/pyproject.toml.
Traceback (most recent call last):
File "/Users/jelle/py/black/src/black/__init__.py", line 648, in reformat_one
if changed is not Changed.CACHED and format_file_in_place(
File "/Users/jelle/py/black/src/black/__init__.py", line 791, in format_file_in_place
dst_contents = format_file_contents(src_contents, fast=fast, mode=mode)
File "/Users/jelle/py/black/src/black/__init__.py", line 912, in format_file_contents
dst_contents = format_str(src_contents, mode=mode)
File "/Users/jelle/py/black/src/black/__init__.py", line 977, in format_str
for line in transform_line(
File "/Users/jelle/py/black/src/black/__init__.py", line 2731, in transform_line
result.extend(
File "/Users/jelle/py/black/src/black/__init__.py", line 2725, in transform_line
for transformed_line in transform(line, features):
File "/Users/jelle/py/black/src/black/__init__.py", line 2834, in __call__
for line_result in self.do_transform(line, string_idx):
File "/Users/jelle/py/black/src/black/__init__.py", line 3315, in do_transform
append_leaves(new_line, line, LL[: string_idx - 1])
File "/Users/jelle/py/black/src/black/__init__.py", line 4601, in append_leaves
new_line.append(new_leaf)
File "/Users/jelle/py/black/src/black/__init__.py", line 1425, in append
self.bracket_tracker.mark(leaf)
File "/Users/jelle/py/black/src/black/__init__.py", line 1291, in mark
opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
KeyError: (-1, 10)
error: cannot format author.py: (-1, 10)
Oh no! 💥 💔 💥
1 file failed to reformat.
|
KeyError
|
def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
LL = line.leaves
string_parser = StringParser()
rpar_idx = string_parser.parse(LL, string_idx)
for leaf in (LL[string_idx - 1], LL[rpar_idx]):
if line.comments_after(leaf):
yield TErr(
"Will not strip parentheses which have comments attached to them."
)
return
new_line = line.clone()
new_line.comments = line.comments.copy()
try:
append_leaves(new_line, line, LL[: string_idx - 1])
except BracketMatchError:
# HACK: I believe there is currently a bug somewhere in
# right_hand_split() that is causing brackets to not be tracked
# properly by a shared BracketTracker.
append_leaves(new_line, line, LL[: string_idx - 1], preformatted=True)
string_leaf = Leaf(token.STRING, LL[string_idx].value)
LL[string_idx - 1].remove()
replace_child(LL[string_idx], string_leaf)
new_line.append(string_leaf)
append_leaves(new_line, line, LL[string_idx + 1 : rpar_idx] + LL[rpar_idx + 1 :])
LL[rpar_idx].remove()
yield Ok(new_line)
|
def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
LL = line.leaves
string_parser = StringParser()
rpar_idx = string_parser.parse(LL, string_idx)
for leaf in (LL[string_idx - 1], LL[rpar_idx]):
if line.comments_after(leaf):
yield TErr(
"Will not strip parentheses which have comments attached to them."
)
new_line = line.clone()
new_line.comments = line.comments.copy()
append_leaves(new_line, line, LL[: string_idx - 1])
string_leaf = Leaf(token.STRING, LL[string_idx].value)
LL[string_idx - 1].remove()
replace_child(LL[string_idx], string_leaf)
new_line.append(string_leaf)
append_leaves(new_line, line, LL[string_idx + 1 : rpar_idx] + LL[rpar_idx + 1 :])
LL[rpar_idx].remove()
yield Ok(new_line)
|
https://github.com/psf/black/issues/1597
|
$ cat author.py
class xxxxxxxxxxxxxxxxxxxxx(xxxx.xxxxxxxxxxxxx):
def xxxxxxx_xxxxxx(xxxx):
assert xxxxxxx_xxxx in [
x.xxxxx.xxxxxx.xxxxx.xxxxxx,
x.xxxxx.xxxxxx.xxxxx.xxxx,
], ("xxxxxxxxxxx xxxxxxx xxxx (xxxxxx xxxx) %x xxx xxxxx" % xxxxxxx_xxxx)
$ black -v author.py
Using configuration from /Users/jelle/py/black/pyproject.toml.
Traceback (most recent call last):
File "/Users/jelle/py/black/src/black/__init__.py", line 648, in reformat_one
if changed is not Changed.CACHED and format_file_in_place(
File "/Users/jelle/py/black/src/black/__init__.py", line 791, in format_file_in_place
dst_contents = format_file_contents(src_contents, fast=fast, mode=mode)
File "/Users/jelle/py/black/src/black/__init__.py", line 912, in format_file_contents
dst_contents = format_str(src_contents, mode=mode)
File "/Users/jelle/py/black/src/black/__init__.py", line 977, in format_str
for line in transform_line(
File "/Users/jelle/py/black/src/black/__init__.py", line 2731, in transform_line
result.extend(
File "/Users/jelle/py/black/src/black/__init__.py", line 2725, in transform_line
for transformed_line in transform(line, features):
File "/Users/jelle/py/black/src/black/__init__.py", line 2834, in __call__
for line_result in self.do_transform(line, string_idx):
File "/Users/jelle/py/black/src/black/__init__.py", line 3315, in do_transform
append_leaves(new_line, line, LL[: string_idx - 1])
File "/Users/jelle/py/black/src/black/__init__.py", line 4601, in append_leaves
new_line.append(new_leaf)
File "/Users/jelle/py/black/src/black/__init__.py", line 1425, in append
self.bracket_tracker.mark(leaf)
File "/Users/jelle/py/black/src/black/__init__.py", line 1291, in mark
opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
KeyError: (-1, 10)
error: cannot format author.py: (-1, 10)
Oh no! 💥 💔 💥
1 file failed to reformat.
|
KeyError
|
def append_leaves(
new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False
) -> None:
"""
Append leaves (taken from @old_line) to @new_line, making sure to fix the
underlying Node structure where appropriate.
All of the leaves in @leaves are duplicated. The duplicates are then
appended to @new_line and used to replace their originals in the underlying
Node structure. Any comments attached to the old leaves are reattached to
the new leaves.
Pre-conditions:
set(@leaves) is a subset of set(@old_line.leaves).
"""
for old_leaf in leaves:
new_leaf = Leaf(old_leaf.type, old_leaf.value)
replace_child(old_leaf, new_leaf)
new_line.append(new_leaf, preformatted=preformatted)
for comment_leaf in old_line.comments_after(old_leaf):
new_line.append(comment_leaf, preformatted=True)
|
def append_leaves(new_line: Line, old_line: Line, leaves: List[Leaf]) -> None:
"""
Append leaves (taken from @old_line) to @new_line, making sure to fix the
underlying Node structure where appropriate.
All of the leaves in @leaves are duplicated. The duplicates are then
appended to @new_line and used to replace their originals in the underlying
Node structure. Any comments attached to the old leaves are reattached to
the new leaves.
Pre-conditions:
set(@leaves) is a subset of set(@old_line.leaves).
"""
for old_leaf in leaves:
new_leaf = Leaf(old_leaf.type, old_leaf.value)
replace_child(old_leaf, new_leaf)
new_line.append(new_leaf)
for comment_leaf in old_line.comments_after(old_leaf):
new_line.append(comment_leaf, preformatted=True)
|
https://github.com/psf/black/issues/1597
|
$ cat author.py
class xxxxxxxxxxxxxxxxxxxxx(xxxx.xxxxxxxxxxxxx):
def xxxxxxx_xxxxxx(xxxx):
assert xxxxxxx_xxxx in [
x.xxxxx.xxxxxx.xxxxx.xxxxxx,
x.xxxxx.xxxxxx.xxxxx.xxxx,
], ("xxxxxxxxxxx xxxxxxx xxxx (xxxxxx xxxx) %x xxx xxxxx" % xxxxxxx_xxxx)
$ black -v author.py
Using configuration from /Users/jelle/py/black/pyproject.toml.
Traceback (most recent call last):
File "/Users/jelle/py/black/src/black/__init__.py", line 648, in reformat_one
if changed is not Changed.CACHED and format_file_in_place(
File "/Users/jelle/py/black/src/black/__init__.py", line 791, in format_file_in_place
dst_contents = format_file_contents(src_contents, fast=fast, mode=mode)
File "/Users/jelle/py/black/src/black/__init__.py", line 912, in format_file_contents
dst_contents = format_str(src_contents, mode=mode)
File "/Users/jelle/py/black/src/black/__init__.py", line 977, in format_str
for line in transform_line(
File "/Users/jelle/py/black/src/black/__init__.py", line 2731, in transform_line
result.extend(
File "/Users/jelle/py/black/src/black/__init__.py", line 2725, in transform_line
for transformed_line in transform(line, features):
File "/Users/jelle/py/black/src/black/__init__.py", line 2834, in __call__
for line_result in self.do_transform(line, string_idx):
File "/Users/jelle/py/black/src/black/__init__.py", line 3315, in do_transform
append_leaves(new_line, line, LL[: string_idx - 1])
File "/Users/jelle/py/black/src/black/__init__.py", line 4601, in append_leaves
new_line.append(new_leaf)
File "/Users/jelle/py/black/src/black/__init__.py", line 1425, in append
self.bracket_tracker.mark(leaf)
File "/Users/jelle/py/black/src/black/__init__.py", line 1291, in mark
opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
KeyError: (-1, 10)
error: cannot format author.py: (-1, 10)
Oh no! 💥 💔 💥
1 file failed to reformat.
|
KeyError
|
def __validate_msg(line: Line, string_idx: int) -> TResult[None]:
"""Validate (M)erge (S)tring (G)roup
Transform-time string validation logic for __merge_string_group(...).
Returns:
* Ok(None), if ALL validation checks (listed below) pass.
OR
* Err(CannotTransform), if any of the following are true:
- The target string group does not contain ANY stand-alone comments.
- The target string is not in a string group (i.e. it has no
adjacent strings).
- The string group has more than one inline comment.
- The string group has an inline comment that appears to be a pragma.
- The set of all string prefixes in the string group is of
length greater than one and is not equal to {"", "f"}.
- The string group consists of raw strings.
"""
# We first check for "inner" stand-alone comments (i.e. stand-alone
# comments that have a string leaf before them AND after them).
for inc in [1, -1]:
i = string_idx
found_sa_comment = False
is_valid_index = is_valid_index_factory(line.leaves)
while is_valid_index(i) and line.leaves[i].type in [
token.STRING,
STANDALONE_COMMENT,
]:
if line.leaves[i].type == STANDALONE_COMMENT:
found_sa_comment = True
elif found_sa_comment:
return TErr(
"StringMerger does NOT merge string groups which contain "
"stand-alone comments."
)
i += inc
num_of_inline_string_comments = 0
set_of_prefixes = set()
num_of_strings = 0
for leaf in line.leaves[string_idx:]:
if leaf.type != token.STRING:
# If the string group is trailed by a comma, we count the
# comments trailing the comma to be one of the string group's
# comments.
if leaf.type == token.COMMA and id(leaf) in line.comments:
num_of_inline_string_comments += 1
break
if has_triple_quotes(leaf.value):
return TErr("StringMerger does NOT merge multiline strings.")
num_of_strings += 1
prefix = get_string_prefix(leaf.value)
if "r" in prefix:
return TErr("StringMerger does NOT merge raw strings.")
set_of_prefixes.add(prefix)
if id(leaf) in line.comments:
num_of_inline_string_comments += 1
if contains_pragma_comment(line.comments[id(leaf)]):
return TErr("Cannot merge strings which have pragma comments.")
if num_of_strings < 2:
return TErr(f"Not enough strings to merge (num_of_strings={num_of_strings}).")
if num_of_inline_string_comments > 1:
return TErr(
f"Too many inline string comments ({num_of_inline_string_comments})."
)
if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}:
return TErr(f"Too many different prefixes ({set_of_prefixes}).")
return Ok(None)
|
def __validate_msg(line: Line, string_idx: int) -> TResult[None]:
"""Validate (M)erge (S)tring (G)roup
Transform-time string validation logic for __merge_string_group(...).
Returns:
* Ok(None), if ALL validation checks (listed below) pass.
OR
* Err(CannotTransform), if any of the following are true:
- The target string is not in a string group (i.e. it has no
adjacent strings).
- The string group has more than one inline comment.
- The string group has an inline comment that appears to be a pragma.
- The set of all string prefixes in the string group is of
length greater than one and is not equal to {"", "f"}.
- The string group consists of raw strings.
"""
num_of_inline_string_comments = 0
set_of_prefixes = set()
num_of_strings = 0
for leaf in line.leaves[string_idx:]:
if leaf.type != token.STRING:
# If the string group is trailed by a comma, we count the
# comments trailing the comma to be one of the string group's
# comments.
if leaf.type == token.COMMA and id(leaf) in line.comments:
num_of_inline_string_comments += 1
break
if has_triple_quotes(leaf.value):
return TErr("StringMerger does NOT merge multiline strings.")
num_of_strings += 1
prefix = get_string_prefix(leaf.value)
if "r" in prefix:
return TErr("StringMerger does NOT merge raw strings.")
set_of_prefixes.add(prefix)
if id(leaf) in line.comments:
num_of_inline_string_comments += 1
if contains_pragma_comment(line.comments[id(leaf)]):
return TErr("Cannot merge strings which have pragma comments.")
if num_of_strings < 2:
return TErr(f"Not enough strings to merge (num_of_strings={num_of_strings}).")
if num_of_inline_string_comments > 1:
return TErr(
f"Too many inline string comments ({num_of_inline_string_comments})."
)
if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}:
return TErr(f"Too many different prefixes ({set_of_prefixes}).")
return Ok(None)
|
https://github.com/psf/black/issues/1596
|
$ cat xxx.py
xxxxxxx_xxxxxx_xxxxxxx = xxx(
[
xxxxxxxxxxxx(
xxxxxx_xxxxxxx=(
'((x.xxxxxxxxx = "xxxxxx.xxxxxxxxxxxxxxxxxxxxx") || (x.xxxxxxxxx = "xxxxxxxxxxxx")) && '
# xxxxx xxxxxxxxxxxx xxxx xxx (xxxxxxxxxxxxxxxx) xx x xxxxxxxxx xx xxxxxx.
"(x.xxxxxxxxxxxx.xxx != "
'"xxx:xxx:xxx::xxxxxxxxxxxx:xxxxxxx-xxxx/xxxxxxxxxxx/xxxxxxxxxxxxxxxxx") && '
)
)
]
)
$ black -v xxx.py
Using configuration from /Users/jelle/py/black/pyproject.toml.
Traceback (most recent call last):
File "/Users/jelle/py/black/src/black/__init__.py", line 648, in reformat_one
if changed is not Changed.CACHED and format_file_in_place(
File "/Users/jelle/py/black/src/black/__init__.py", line 791, in format_file_in_place
dst_contents = format_file_contents(src_contents, fast=fast, mode=mode)
File "/Users/jelle/py/black/src/black/__init__.py", line 918, in format_file_contents
assert_stable(src_contents, dst_contents, mode=mode)
File "/Users/jelle/py/black/src/black/__init__.py", line 6076, in assert_stable
newdst = format_str(dst, mode=mode)
File "/Users/jelle/py/black/src/black/__init__.py", line 977, in format_str
for line in transform_line(
File "/Users/jelle/py/black/src/black/__init__.py", line 2731, in transform_line
result.extend(
File "/Users/jelle/py/black/src/black/__init__.py", line 2725, in transform_line
for transformed_line in transform(line, features):
File "/Users/jelle/py/black/src/black/__init__.py", line 2834, in __call__
for line_result in self.do_transform(line, string_idx):
File "/Users/jelle/py/black/src/black/__init__.py", line 2978, in do_transform
msg_result = self.__merge_string_group(new_line, string_idx)
File "/Users/jelle/py/black/src/black/__init__.py", line 3164, in __merge_string_group
new_line.append(string_leaf)
File "/Users/jelle/py/black/src/black/__init__.py", line 1421, in append
leaf.prefix += whitespace(
File "/Users/jelle/py/black/src/black/__init__.py", line 2152, in whitespace
assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
AssertionError: INTERNAL ERROR: hand-made leaf without parent: Leaf(STRING, '\'(x.xxxxxxxxxxxx.xxx != "xxx:xxx:xxx::xxxxxxxxxxxx:xxxxxxx-xxxx/xxxxxxxxxxx/xxxxxxxxxxxxxxxxx") && \'')
error: cannot format xxx.py: INTERNAL ERROR: hand-made leaf without parent: Leaf(STRING, '\'(x.xxxxxxxxxxxx.xxx != "xxx:xxx:xxx::xxxxxxxxxxxx:xxxxxxx-xxxx/xxxxxxxxxxx/xxxxxxxxxxxxxxxxx") && \'')
Oh no! 💥 💔 💥
1 file failed to reformat.
|
AssertionError
|
def normalize_path_maybe_ignore(
path: Path, root: Path, report: "Report"
) -> Optional[str]:
"""Normalize `path`. May return `None` if `path` was ignored.
`report` is where "path ignored" output goes.
"""
try:
abspath = path if path.is_absolute() else Path.cwd() / path
normalized_path = abspath.resolve().relative_to(root).as_posix()
except OSError as e:
report.path_ignored(path, f"cannot be read because {e}")
return None
except ValueError:
if path.is_symlink():
report.path_ignored(path, f"is a symbolic link that points outside {root}")
return None
raise
return normalized_path
|
def normalize_path_maybe_ignore(
path: Path, root: Path, report: "Report"
) -> Optional[str]:
"""Normalize `path`. May return `None` if `path` was ignored.
`report` is where "path ignored" output goes.
"""
try:
normalized_path = path.resolve().relative_to(root).as_posix()
except OSError as e:
report.path_ignored(path, f"cannot be read because {e}")
return None
except ValueError:
if path.is_symlink():
report.path_ignored(path, f"is a symbolic link that points outside {root}")
return None
raise
return normalized_path
|
https://github.com/psf/black/issues/1631
|
root@2d592a60ac50:/# /opt/conda/envs/lib3to6_py38/bin/black src/
Traceback (most recent call last):
File "/opt/conda/envs/lib3to6_py38/bin/black", line 8, in <module>
sys.exit(patched_main())
File "/opt/conda/envs/lib3to6_py38/lib/python3.8/site-packages/black/__init__.py", line 6607, in patched_main
main()
File "/opt/conda/envs/lib3to6_py38/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/envs/lib3to6_py38/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/opt/conda/envs/lib3to6_py38/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/envs/lib3to6_py38/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/opt/conda/envs/lib3to6_py38/lib/python3.8/site-packages/click/decorators.py", line 21, in new_func
return f(get_current_context(), *args, **kwargs)
File "/opt/conda/envs/lib3to6_py38/lib/python3.8/site-packages/black/__init__.py", line 528, in main
sources = get_sources(
File "/opt/conda/envs/lib3to6_py38/lib/python3.8/site-packages/black/__init__.py", line 604, in get_sources
sources.update(
File "/opt/conda/envs/lib3to6_py38/lib/python3.8/site-packages/black/__init__.py", line 5868, in gen_python_files
normalized_path = normalize_path_maybe_ignore(child, root, report)
File "/opt/conda/envs/lib3to6_py38/lib/python3.8/site-packages/black/__init__.py", line 5835, in normalize_path_maybe_ignore
normalized_path = path.resolve().relative_to(root).as_posix()
File "/opt/conda/envs/lib3to6_py38/lib/python3.8/pathlib.py", line 904, in relative_to
raise ValueError("{!r} does not start with {!r}"
ValueError: '//src/lib3to6' does not start with '/'
|
ValueError
|
def parse_pyproject_toml(path_config: str) -> Dict[str, Any]:
"""Parse a pyproject toml file, pulling out relevant parts for Black
If parsing fails, will raise a toml.TomlDecodeError
"""
pyproject_toml = toml.load(path_config)
config = pyproject_toml.get("tool", {}).get("black", {})
return {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
|
def parse_pyproject_toml(path_config: str) -> Dict[str, Any]:
"""Parse a pyproject toml file, pulling out relevant parts for Black
If parsing fails, will raise a toml.TomlDecodeError
"""
pyproject_toml = toml.load(path_config)
config = pyproject_toml.get("tool", {}).get("black", {})
return {
k.replace("--", "").replace("-", "_"): str(v)
if not isinstance(v, (list, dict))
else v
for k, v in config.items()
}
|
https://github.com/psf/black/issues/1496
|
$ python -m unittest
E....[2020-06-14 14:22:15,341] DEBUG: Using selector: EpollSelector (selector_events.py:59)
.[2020-06-14 14:22:15,342] DEBUG: Using selector: EpollSelector (selector_events.py:59)
.[2020-06-14 14:22:15,680] DEBUG: Using selector: EpollSelector (selector_events.py:59)
.[2020-06-14 14:22:15,682] DEBUG: Using selector: EpollSelector (selector_events.py:59)
[2020-06-14 14:22:15,683] INFO: 16 projects to run Black over (lib.py:311)
[2020-06-14 14:22:15,683] DEBUG: Using 2 parallel workers to run Black (lib.py:316)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on aioexabgp (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on attrs (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on bandersnatch (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on channels (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on django (lib.py:247)
[2020-06-14 14:22:15,684] INFO: Skipping django as it's disabled via config (lib.py:254)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on flake8-bugbear (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on hypothesis (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on pandas (lib.py:247)
[2020-06-14 14:22:15,684] INFO: Skipping pandas as it's disabled via config (lib.py:254)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on poetry (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on ptr (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on pyramid (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on pytest (lib.py:247)
[2020-06-14 14:22:15,684] INFO: Skipping pytest as it's disabled via config (lib.py:254)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on sqlalchemy (lib.py:247)
[2020-06-14 14:22:15,685] DEBUG: worker 0 working on tox (lib.py:247)
[2020-06-14 14:22:15,685] DEBUG: worker 0 working on virtualenv (lib.py:247)
[2020-06-14 14:22:15,685] DEBUG: worker 0 working on warehouse (lib.py:247)
[2020-06-14 14:22:15,685] DEBUG: project_runner 0 exiting (lib.py:245)
[2020-06-14 14:22:15,685] DEBUG: project_runner 1 exiting (lib.py:245)
[2020-06-14 14:22:15,685] INFO: Analyzing results (lib.py:327)
.
======================================================================
ERROR: tests.test_black (unittest.loader._FailedTest)
----------------------------------------------------------------------
ImportError: Failed to import test module: tests.test_black
Traceback (most recent call last):
File "/home/username/miniconda3/envs/ml4sts/lib/python3.8/unittest/loader.py", line 436, in _find_test_path
module = self._get_module_from_name(name)
File "/home/username/miniconda3/envs/ml4sts/lib/python3.8/unittest/loader.py", line 377, in _get_module_from_name
__import__(name)
File "/home/erik/black/tests/test_black.py", line 1766, in <module>
class BlackDTestCase(AioHTTPTestCase):
NameError: name 'AioHTTPTestCase' is not defined
----------------------------------------------------------------------
Ran 9 tests in 0.348s
FAILED (errors=1)
|
ImportError
|
def read_pyproject_toml(
ctx: click.Context, param: click.Parameter, value: Optional[str]
) -> Optional[str]:
"""Inject Black configuration from "pyproject.toml" into defaults in `ctx`.
Returns the path to a successfully found and read configuration file, None
otherwise.
"""
if not value:
value = find_pyproject_toml(ctx.params.get("src", ()))
if value is None:
return None
try:
config = parse_pyproject_toml(value)
except (toml.TomlDecodeError, OSError) as e:
raise click.FileError(
filename=value, hint=f"Error reading configuration file: {e}"
)
if not config:
return None
else:
# Sanitize the values to be Click friendly. For more information please see:
# https://github.com/psf/black/issues/1458
# https://github.com/pallets/click/issues/1567
config = {
k: str(v) if not isinstance(v, (list, dict)) else v
for k, v in config.items()
}
target_version = config.get("target_version")
if target_version is not None and not isinstance(target_version, list):
raise click.BadOptionUsage(
"target-version", "Config key target-version must be a list"
)
default_map: Dict[str, Any] = {}
if ctx.default_map:
default_map.update(ctx.default_map)
default_map.update(config)
ctx.default_map = default_map
return value
|
def read_pyproject_toml(
ctx: click.Context, param: click.Parameter, value: Optional[str]
) -> Optional[str]:
"""Inject Black configuration from "pyproject.toml" into defaults in `ctx`.
Returns the path to a successfully found and read configuration file, None
otherwise.
"""
if not value:
value = find_pyproject_toml(ctx.params.get("src", ()))
if value is None:
return None
try:
config = parse_pyproject_toml(value)
except (toml.TomlDecodeError, OSError) as e:
raise click.FileError(
filename=value, hint=f"Error reading configuration file: {e}"
)
if not config:
return None
target_version = config.get("target_version")
if target_version is not None and not isinstance(target_version, list):
raise click.BadOptionUsage(
"target-version", "Config key target-version must be a list"
)
default_map: Dict[str, Any] = {}
if ctx.default_map:
default_map.update(ctx.default_map)
default_map.update(config)
ctx.default_map = default_map
return value
|
https://github.com/psf/black/issues/1496
|
$ python -m unittest
E....[2020-06-14 14:22:15,341] DEBUG: Using selector: EpollSelector (selector_events.py:59)
.[2020-06-14 14:22:15,342] DEBUG: Using selector: EpollSelector (selector_events.py:59)
.[2020-06-14 14:22:15,680] DEBUG: Using selector: EpollSelector (selector_events.py:59)
.[2020-06-14 14:22:15,682] DEBUG: Using selector: EpollSelector (selector_events.py:59)
[2020-06-14 14:22:15,683] INFO: 16 projects to run Black over (lib.py:311)
[2020-06-14 14:22:15,683] DEBUG: Using 2 parallel workers to run Black (lib.py:316)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on aioexabgp (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on attrs (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on bandersnatch (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on channels (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on django (lib.py:247)
[2020-06-14 14:22:15,684] INFO: Skipping django as it's disabled via config (lib.py:254)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on flake8-bugbear (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on hypothesis (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on pandas (lib.py:247)
[2020-06-14 14:22:15,684] INFO: Skipping pandas as it's disabled via config (lib.py:254)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on poetry (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on ptr (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on pyramid (lib.py:247)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on pytest (lib.py:247)
[2020-06-14 14:22:15,684] INFO: Skipping pytest as it's disabled via config (lib.py:254)
[2020-06-14 14:22:15,684] DEBUG: worker 0 working on sqlalchemy (lib.py:247)
[2020-06-14 14:22:15,685] DEBUG: worker 0 working on tox (lib.py:247)
[2020-06-14 14:22:15,685] DEBUG: worker 0 working on virtualenv (lib.py:247)
[2020-06-14 14:22:15,685] DEBUG: worker 0 working on warehouse (lib.py:247)
[2020-06-14 14:22:15,685] DEBUG: project_runner 0 exiting (lib.py:245)
[2020-06-14 14:22:15,685] DEBUG: project_runner 1 exiting (lib.py:245)
[2020-06-14 14:22:15,685] INFO: Analyzing results (lib.py:327)
.
======================================================================
ERROR: tests.test_black (unittest.loader._FailedTest)
----------------------------------------------------------------------
ImportError: Failed to import test module: tests.test_black
Traceback (most recent call last):
File "/home/username/miniconda3/envs/ml4sts/lib/python3.8/unittest/loader.py", line 436, in _find_test_path
module = self._get_module_from_name(name)
File "/home/username/miniconda3/envs/ml4sts/lib/python3.8/unittest/loader.py", line 377, in _get_module_from_name
__import__(name)
File "/home/erik/black/tests/test_black.py", line 1766, in <module>
class BlackDTestCase(AioHTTPTestCase):
NameError: name 'AioHTTPTestCase' is not defined
----------------------------------------------------------------------
Ran 9 tests in 0.348s
FAILED (errors=1)
|
ImportError
|
def reformat_many(
sources: Set[Path], fast: bool, write_back: WriteBack, mode: Mode, report: "Report"
) -> None:
"""Reformat multiple files using a ProcessPoolExecutor."""
loop = asyncio.get_event_loop()
worker_count = os.cpu_count()
if sys.platform == "win32":
# Work around https://bugs.python.org/issue26903
worker_count = min(worker_count, 61)
try:
executor = ProcessPoolExecutor(max_workers=worker_count)
except OSError:
# we arrive here if the underlying system does not support multi-processing
# like in AWS Lambda, in which case we gracefully fallback to the default
# mono-process Executor by using None
executor = None
try:
loop.run_until_complete(
schedule_formatting(
sources=sources,
fast=fast,
write_back=write_back,
mode=mode,
report=report,
loop=loop,
executor=executor,
)
)
finally:
shutdown(loop)
if executor is not None:
executor.shutdown()
|
def reformat_many(
sources: Set[Path], fast: bool, write_back: WriteBack, mode: Mode, report: "Report"
) -> None:
"""Reformat multiple files using a ProcessPoolExecutor."""
loop = asyncio.get_event_loop()
worker_count = os.cpu_count()
if sys.platform == "win32":
# Work around https://bugs.python.org/issue26903
worker_count = min(worker_count, 61)
executor = ProcessPoolExecutor(max_workers=worker_count)
try:
loop.run_until_complete(
schedule_formatting(
sources=sources,
fast=fast,
write_back=write_back,
mode=mode,
report=report,
loop=loop,
executor=executor,
)
)
finally:
shutdown(loop)
executor.shutdown()
|
https://github.com/psf/black/issues/776
|
|Traceback (most recent call last):
| File "/usr/local/lib/XXX/virtualenv/bin/black", line 11, in <module>
| sys.exit(patched_main())
| File "/usr/local/lib/XXX/virtualenv/lib/python3.6/site-packages/black.py", line 3754, in patched_main
| main()
| File "/usr/local/lib/XXX/virtualenv/lib/python3.6/site-packages/click/core.py", line 764, in __call__
| return self.main(*args, **kwargs)
| File "/usr/local/lib/XXX/virtualenv/lib/python3.6/site-packages/click/core.py", line 717, in main
| rv = self.invoke(ctx)
| File "/usr/local/lib/XXX/virtualenv/lib/python3.6/site-packages/click/core.py", line 956, in invoke
| return ctx.invoke(self.callback, **ctx.params)
| File "/usr/local/lib/XXX/virtualenv/lib/python3.6/site-packages/click/core.py", line 555, in invoke
| return callback(*args, **kwargs)
| File "/usr/local/lib/XXX/virtualenv/lib/python3.6/site-packages/click/decorators.py", line 17, in new_func
| return f(get_current_context(), *args, **kwargs)
| File "/usr/local/lib/XXX/virtualenv/lib/python3.6/site-packages/black.py", line 435, in main
| executor = ProcessPoolExecutor(max_workers=os.cpu_count())
| File "/usr/lib/python3.6/concurrent/futures/process.py", line 402, in __init__
| EXTRA_QUEUED_CALLS)
| File "/usr/lib/python3.6/multiprocessing/context.py", line 102, in Queue
| return Queue(maxsize, ctx=self.get_context())
| File "/usr/lib/python3.6/multiprocessing/queues.py", line 42, in __init__
| self._rlock = ctx.Lock()
| File "/usr/lib/python3.6/multiprocessing/context.py", line 67, in Lock
| return Lock(ctx=self.get_context())
| File "/usr/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__
| SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx)
| File "/usr/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__
| unlink_now)
|OSError: [Errno 30] Read-only file system
|
OSError
|
async def schedule_formatting(
sources: Set[Path],
fast: bool,
write_back: WriteBack,
mode: Mode,
report: "Report",
loop: asyncio.AbstractEventLoop,
executor: Optional[Executor],
) -> None:
"""Run formatting of `sources` in parallel using the provided `executor`.
(Use ProcessPoolExecutors for actual parallelism.)
`write_back`, `fast`, and `mode` options are passed to
:func:`format_file_in_place`.
"""
cache: Cache = {}
if write_back != WriteBack.DIFF:
cache = read_cache(mode)
sources, cached = filter_cached(cache, sources)
for src in sorted(cached):
report.done(src, Changed.CACHED)
if not sources:
return
cancelled = []
sources_to_cache = []
lock = None
if write_back == WriteBack.DIFF:
# For diff output, we need locks to ensure we don't interleave output
# from different processes.
manager = Manager()
lock = manager.Lock()
tasks = {
asyncio.ensure_future(
loop.run_in_executor(
executor, format_file_in_place, src, fast, mode, write_back, lock
)
): src
for src in sorted(sources)
}
pending: Iterable["asyncio.Future[bool]"] = tasks.keys()
try:
loop.add_signal_handler(signal.SIGINT, cancel, pending)
loop.add_signal_handler(signal.SIGTERM, cancel, pending)
except NotImplementedError:
# There are no good alternatives for these on Windows.
pass
while pending:
done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
for task in done:
src = tasks.pop(task)
if task.cancelled():
cancelled.append(task)
elif task.exception():
report.failed(src, str(task.exception()))
else:
changed = Changed.YES if task.result() else Changed.NO
# If the file was written back or was successfully checked as
# well-formatted, store this information in the cache.
if write_back is WriteBack.YES or (
write_back is WriteBack.CHECK and changed is Changed.NO
):
sources_to_cache.append(src)
report.done(src, changed)
if cancelled:
await asyncio.gather(*cancelled, loop=loop, return_exceptions=True)
if sources_to_cache:
write_cache(cache, sources_to_cache, mode)
|
async def schedule_formatting(
sources: Set[Path],
fast: bool,
write_back: WriteBack,
mode: Mode,
report: "Report",
loop: asyncio.AbstractEventLoop,
executor: Executor,
) -> None:
"""Run formatting of `sources` in parallel using the provided `executor`.
(Use ProcessPoolExecutors for actual parallelism.)
`write_back`, `fast`, and `mode` options are passed to
:func:`format_file_in_place`.
"""
cache: Cache = {}
if write_back != WriteBack.DIFF:
cache = read_cache(mode)
sources, cached = filter_cached(cache, sources)
for src in sorted(cached):
report.done(src, Changed.CACHED)
if not sources:
return
cancelled = []
sources_to_cache = []
lock = None
if write_back == WriteBack.DIFF:
# For diff output, we need locks to ensure we don't interleave output
# from different processes.
manager = Manager()
lock = manager.Lock()
tasks = {
asyncio.ensure_future(
loop.run_in_executor(
executor, format_file_in_place, src, fast, mode, write_back, lock
)
): src
for src in sorted(sources)
}
pending: Iterable["asyncio.Future[bool]"] = tasks.keys()
try:
loop.add_signal_handler(signal.SIGINT, cancel, pending)
loop.add_signal_handler(signal.SIGTERM, cancel, pending)
except NotImplementedError:
# There are no good alternatives for these on Windows.
pass
while pending:
done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
for task in done:
src = tasks.pop(task)
if task.cancelled():
cancelled.append(task)
elif task.exception():
report.failed(src, str(task.exception()))
else:
changed = Changed.YES if task.result() else Changed.NO
# If the file was written back or was successfully checked as
# well-formatted, store this information in the cache.
if write_back is WriteBack.YES or (
write_back is WriteBack.CHECK and changed is Changed.NO
):
sources_to_cache.append(src)
report.done(src, changed)
if cancelled:
await asyncio.gather(*cancelled, loop=loop, return_exceptions=True)
if sources_to_cache:
write_cache(cache, sources_to_cache, mode)
|
https://github.com/psf/black/issues/776
|
|Traceback (most recent call last):
| File "/usr/local/lib/XXX/virtualenv/bin/black", line 11, in <module>
| sys.exit(patched_main())
| File "/usr/local/lib/XXX/virtualenv/lib/python3.6/site-packages/black.py", line 3754, in patched_main
| main()
| File "/usr/local/lib/XXX/virtualenv/lib/python3.6/site-packages/click/core.py", line 764, in __call__
| return self.main(*args, **kwargs)
| File "/usr/local/lib/XXX/virtualenv/lib/python3.6/site-packages/click/core.py", line 717, in main
| rv = self.invoke(ctx)
| File "/usr/local/lib/XXX/virtualenv/lib/python3.6/site-packages/click/core.py", line 956, in invoke
| return ctx.invoke(self.callback, **ctx.params)
| File "/usr/local/lib/XXX/virtualenv/lib/python3.6/site-packages/click/core.py", line 555, in invoke
| return callback(*args, **kwargs)
| File "/usr/local/lib/XXX/virtualenv/lib/python3.6/site-packages/click/decorators.py", line 17, in new_func
| return f(get_current_context(), *args, **kwargs)
| File "/usr/local/lib/XXX/virtualenv/lib/python3.6/site-packages/black.py", line 435, in main
| executor = ProcessPoolExecutor(max_workers=os.cpu_count())
| File "/usr/lib/python3.6/concurrent/futures/process.py", line 402, in __init__
| EXTRA_QUEUED_CALLS)
| File "/usr/lib/python3.6/multiprocessing/context.py", line 102, in Queue
| return Queue(maxsize, ctx=self.get_context())
| File "/usr/lib/python3.6/multiprocessing/queues.py", line 42, in __init__
| self._rlock = ctx.Lock()
| File "/usr/lib/python3.6/multiprocessing/context.py", line 67, in Lock
| return Lock(ctx=self.get_context())
| File "/usr/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__
| SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx)
| File "/usr/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__
| unlink_now)
|OSError: [Errno 30] Read-only file system
|
OSError
|
def main(
ctx: click.Context,
code: Optional[str],
line_length: int,
target_version: List[TargetVersion],
check: bool,
diff: bool,
fast: bool,
pyi: bool,
py36: bool,
skip_string_normalization: bool,
quiet: bool,
verbose: bool,
include: str,
exclude: str,
src: Tuple[str],
config: Optional[str],
) -> None:
"""The uncompromising code formatter."""
write_back = WriteBack.from_configuration(check=check, diff=diff)
if target_version:
if py36:
err(f"Cannot use both --target-version and --py36")
ctx.exit(2)
else:
versions = set(target_version)
elif py36:
err(
"--py36 is deprecated and will be removed in a future version. "
"Use --target-version py36 instead."
)
versions = PY36_VERSIONS
else:
# We'll autodetect later.
versions = set()
mode = FileMode(
target_versions=versions,
line_length=line_length,
is_pyi=pyi,
string_normalization=not skip_string_normalization,
)
if config and verbose:
out(f"Using configuration from {config}.", bold=False, fg="blue")
if code is not None:
print(format_str(code, mode=mode))
ctx.exit(0)
try:
include_regex = re_compile_maybe_verbose(include)
except re.error:
err(f"Invalid regular expression for include given: {include!r}")
ctx.exit(2)
try:
exclude_regex = re_compile_maybe_verbose(exclude)
except re.error:
err(f"Invalid regular expression for exclude given: {exclude!r}")
ctx.exit(2)
report = Report(check=check, quiet=quiet, verbose=verbose)
root = find_project_root(src)
sources: Set[Path] = set()
for s in src:
p = Path(s)
if p.is_dir():
sources.update(
gen_python_files_in_dir(p, root, include_regex, exclude_regex, report)
)
elif p.is_file() or s == "-":
# if a file was explicitly given, we don't care about its extension
sources.add(p)
else:
err(f"invalid path: {s}")
if len(sources) == 0:
if verbose or not quiet:
out("No paths given. Nothing to do 😴")
ctx.exit(0)
if len(sources) == 1:
reformat_one(
src=sources.pop(),
fast=fast,
write_back=write_back,
mode=mode,
report=report,
)
else:
reformat_many(
sources=sources, fast=fast, write_back=write_back, mode=mode, report=report
)
if verbose or not quiet:
bang = "💥 💔 💥" if report.return_code else "✨ 🍰 ✨"
out(f"All done! {bang}")
click.secho(str(report), err=True)
ctx.exit(report.return_code)
|
def main(
ctx: click.Context,
code: Optional[str],
line_length: int,
target_version: List[TargetVersion],
check: bool,
diff: bool,
fast: bool,
pyi: bool,
py36: bool,
skip_string_normalization: bool,
quiet: bool,
verbose: bool,
include: str,
exclude: str,
src: Tuple[str],
config: Optional[str],
) -> None:
"""The uncompromising code formatter."""
write_back = WriteBack.from_configuration(check=check, diff=diff)
if target_version:
if py36:
err(f"Cannot use both --target-version and --py36")
ctx.exit(2)
else:
versions = set(target_version)
elif py36:
err(
"--py36 is deprecated and will be removed in a future version. "
"Use --target-version py36 instead."
)
versions = PY36_VERSIONS
else:
# We'll autodetect later.
versions = set()
mode = FileMode(
target_versions=versions,
line_length=line_length,
is_pyi=pyi,
string_normalization=not skip_string_normalization,
)
if config and verbose:
out(f"Using configuration from {config}.", bold=False, fg="blue")
if code is not None:
print(format_str(code, mode=mode))
ctx.exit(0)
try:
include_regex = re_compile_maybe_verbose(include)
except re.error:
err(f"Invalid regular expression for include given: {include!r}")
ctx.exit(2)
try:
exclude_regex = re_compile_maybe_verbose(exclude)
except re.error:
err(f"Invalid regular expression for exclude given: {exclude!r}")
ctx.exit(2)
report = Report(check=check, quiet=quiet, verbose=verbose)
root = find_project_root(src)
sources: Set[Path] = set()
for s in src:
p = Path(s)
if p.is_dir():
sources.update(
gen_python_files_in_dir(p, root, include_regex, exclude_regex, report)
)
elif p.is_file() or s == "-":
# if a file was explicitly given, we don't care about its extension
sources.add(p)
else:
err(f"invalid path: {s}")
if len(sources) == 0:
if verbose or not quiet:
out("No paths given. Nothing to do 😴")
ctx.exit(0)
if len(sources) == 1:
reformat_one(
src=sources.pop(),
fast=fast,
write_back=write_back,
mode=mode,
report=report,
)
else:
loop = asyncio.get_event_loop()
executor = ProcessPoolExecutor(max_workers=os.cpu_count())
try:
loop.run_until_complete(
schedule_formatting(
sources=sources,
fast=fast,
write_back=write_back,
mode=mode,
report=report,
loop=loop,
executor=executor,
)
)
finally:
shutdown(loop)
if verbose or not quiet:
bang = "💥 💔 💥" if report.return_code else "✨ 🍰 ✨"
out(f"All done! {bang}")
click.secho(str(report), err=True)
ctx.exit(report.return_code)
|
https://github.com/psf/black/issues/564
|
Exception in thread QueueManagerThread:
Traceback (most recent call last):
File "c:\python37\lib\threading.py", line 917, in _bootstrap_inner
self.run()
File "c:\python37\lib\threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "c:\python37\lib\concurrent\futures\process.py", line 354, in _queue_management_worker
ready = wait(readers + worker_sentinels)
File "c:\python37\lib\multiprocessing\connection.py", line 868, in wait
ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout)
File "c:\python37\lib\multiprocessing\connection.py", line 800, in _exhaustive_wait
res = _winapi.WaitForMultipleObjects(L, False, timeout)
ValueError: need at most 63 handles, got a sequence of length 65
|
ValueError
|
def gen_python_files_in_dir(
path: Path,
root: Path,
include: Pattern[str],
exclude: Pattern[str],
report: "Report",
) -> Iterator[Path]:
"""Generate all files under `path` whose paths are not excluded by the
`exclude` regex, but are included by the `include` regex.
Symbolic links pointing outside of the root directory are ignored.
`report` is where output about exclusions goes.
"""
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
for child in path.iterdir():
try:
normalized_path = "/" + child.resolve().relative_to(root).as_posix()
except ValueError:
if child.is_symlink():
report.path_ignored(
child,
"is a symbolic link that points outside of the root directory",
)
continue
raise
if child.is_dir():
normalized_path += "/"
exclude_match = exclude.search(normalized_path)
if exclude_match and exclude_match.group(0):
report.path_ignored(child, f"matches the --exclude regular expression")
continue
if child.is_dir():
yield from gen_python_files_in_dir(child, root, include, exclude, report)
elif child.is_file():
include_match = include.search(normalized_path)
if include_match:
yield child
|
def gen_python_files_in_dir(
path: Path,
root: Path,
include: Pattern[str],
exclude: Pattern[str],
report: "Report",
) -> Iterator[Path]:
"""Generate all files under `path` whose paths are not excluded by the
`exclude` regex, but are included by the `include` regex.
`report` is where output about exclusions goes.
"""
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
for child in path.iterdir():
normalized_path = "/" + child.resolve().relative_to(root).as_posix()
if child.is_dir():
normalized_path += "/"
exclude_match = exclude.search(normalized_path)
if exclude_match and exclude_match.group(0):
report.path_ignored(child, f"matches the --exclude regular expression")
continue
if child.is_dir():
yield from gen_python_files_in_dir(child, root, include, exclude, report)
elif child.is_file():
include_match = include.search(normalized_path)
if include_match:
yield child
|
https://github.com/psf/black/issues/338
|
Traceback (most recent call last):
File "/home/neraste/.virtualenvs/test_black/bin/black", line 11, in <module>
sys.exit(main())
File "/home/neraste/.virtualenvs/test_black/lib/python3.6/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/home/neraste/.virtualenvs/test_black/lib/python3.6/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/home/neraste/.virtualenvs/test_black/lib/python3.6/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/neraste/.virtualenvs/test_black/lib/python3.6/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/home/neraste/.virtualenvs/test_black/lib/python3.6/site-packages/click/decorators.py", line 17, in new_func
return f(get_current_context(), *args, **kwargs)
File "/home/neraste/.virtualenvs/test_black/lib/python3.6/site-packages/black.py", line 347, in main
gen_python_files_in_dir(p, root, include_regex, exclude_regex, report)
File "/home/neraste/.virtualenvs/test_black/lib/python3.6/site-packages/black.py", line 2942, in gen_python_files_in_dir
normalized_path = "/" + child.resolve().relative_to(root).as_posix()
File "/usr/lib64/python3.6/pathlib.py", line 872, in relative_to
.format(str(self), str(formatted)))
ValueError: '/data/test_black/base/resource' does not start with '/data/test_black/base/repo'
|
ValueError
|
def dump_to_file(*output: str) -> str:
"""Dump `output` to a temporary file. Return path to the file."""
import tempfile
with tempfile.NamedTemporaryFile(
mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8"
) as f:
for lines in output:
f.write(lines)
if lines and lines[-1] != "\n":
f.write("\n")
return f.name
|
def dump_to_file(*output: str) -> str:
"""Dump `output` to a temporary file. Return path to the file."""
import tempfile
with tempfile.NamedTemporaryFile(
mode="w", prefix="blk_", suffix=".log", delete=False
) as f:
for lines in output:
f.write(lines)
if lines and lines[-1] != "\n":
f.write("\n")
return f.name
|
https://github.com/psf/black/issues/124
|
ERROR: test_expression_diff (tests.test_black.BlackTestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "c:\users\zsolz\documents\github\black\tests\test_black.py", line 165, in test_expression_diff
tmp_file = Path(black.dump_to_file(source))
File "c:\users\zsolz\documents\github\black\black.py", line 2161, in dump_to_file
f.write(lines)
File "C:\Users\zsolz\.virtualenvs\black-TlIYXM7K\lib\tempfile.py", line 483, in func_wrapper
return func(*args, **kwargs)
File "C:\Users\zsolz\.virtualenvs\black-TlIYXM7K\lib\encodings\cp1252.py", line 19, in encode
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
UnicodeEncodeError: 'charmap' codec can't encode character '\u0142' in position 4011: character maps to <undefined>
|
UnicodeEncodeError
|
def add_graph(
self,
adjacency_matrix,
node_coords,
node_color="auto",
node_size=50,
edge_cmap=cm.bwr,
edge_vmin=None,
edge_vmax=None,
edge_threshold=None,
edge_kwargs=None,
node_kwargs=None,
colorbar=False,
):
"""Plot undirected graph on each of the axes
Parameters
----------
adjacency_matrix: numpy array of shape (n, n)
represents the edges strengths of the graph. Assumed to be
a symmetric matrix.
node_coords: numpy array_like of shape (n, 3)
3d coordinates of the graph nodes in world space.
node_color: color or sequence of colors
color(s) of the nodes.
node_size: scalar or array_like
size(s) of the nodes in points^2.
edge_cmap: colormap
colormap used for representing the strength of the edges.
edge_vmin: float, optional, default: None
edge_vmax: float, optional, default: None
If not None, either or both of these values will be used to
as the minimum and maximum values to color edges. If None are
supplied the maximum absolute value within the given threshold
will be used as minimum (multiplied by -1) and maximum
coloring levels.
edge_threshold: str or number
If it is a number only the edges with a value greater than
edge_threshold will be shown.
If it is a string it must finish with a percent sign,
e.g. "25.3%", and only the edges with a abs(value) above
the given percentile will be shown.
edge_kwargs: dict
will be passed as kwargs for each edge matlotlib Line2D.
node_kwargs: dict
will be passed as kwargs to the plt.scatter call that plots all
the nodes in one go.
"""
# set defaults
if edge_kwargs is None:
edge_kwargs = {}
if node_kwargs is None:
node_kwargs = {}
if isinstance(node_color, str) and node_color == "auto":
nb_nodes = len(node_coords)
node_color = mpl_cm.Set2(np.linspace(0, 1, nb_nodes))
node_coords = np.asarray(node_coords)
# decompress input matrix if sparse
if sparse.issparse(adjacency_matrix):
adjacency_matrix = adjacency_matrix.toarray()
# make the lines below well-behaved
adjacency_matrix = np.nan_to_num(adjacency_matrix)
# safety checks
if "s" in node_kwargs:
raise ValueError(
"Please use 'node_size' and not 'node_kwargs' to specify node sizes"
)
if "c" in node_kwargs:
raise ValueError(
"Please use 'node_color' and not 'node_kwargs' to specify node colors"
)
adjacency_matrix_shape = adjacency_matrix.shape
if (
len(adjacency_matrix_shape) != 2
or adjacency_matrix_shape[0] != adjacency_matrix_shape[1]
):
raise ValueError(
"'adjacency_matrix' is supposed to have shape (n, n)."
" Its shape was {0}".format(adjacency_matrix_shape)
)
node_coords_shape = node_coords.shape
if len(node_coords_shape) != 2 or node_coords_shape[1] != 3:
message = (
"Invalid shape for 'node_coords'. You passed an "
"'adjacency_matrix' of shape {0} therefore "
"'node_coords' should be a array with shape ({0[0]}, 3) "
"while its shape was {1}"
).format(adjacency_matrix_shape, node_coords_shape)
raise ValueError(message)
if isinstance(node_color, (list, np.ndarray)) and len(node_color) != 1:
if len(node_color) != node_coords_shape[0]:
raise ValueError(
"Mismatch between the number of nodes ({0}) "
"and and the number of node colors ({1}).".format(
node_coords_shape[0], len(node_color)
)
)
if node_coords_shape[0] != adjacency_matrix_shape[0]:
raise ValueError(
"Shape mismatch between 'adjacency_matrix' "
"and 'node_coords'"
"'adjacency_matrix' shape is {0}, 'node_coords' shape is {1}".format(
adjacency_matrix_shape, node_coords_shape
)
)
if not np.allclose(adjacency_matrix, adjacency_matrix.T, rtol=1e-3):
raise ValueError("'adjacency_matrix' should be symmetric")
# For a masked array, masked values are replaced with zeros
if hasattr(adjacency_matrix, "mask"):
if not (adjacency_matrix.mask == adjacency_matrix.mask.T).all():
raise ValueError("'adjacency_matrix' was masked with a non symmetric mask")
adjacency_matrix = adjacency_matrix.filled(0)
if edge_threshold is not None:
# Keep a percentile of edges with the highest absolute
# values, so only need to look at the covariance
# coefficients below the diagonal
lower_diagonal_indices = np.tril_indices_from(adjacency_matrix, k=-1)
lower_diagonal_values = adjacency_matrix[lower_diagonal_indices]
edge_threshold = _utils.param_validation.check_threshold(
edge_threshold,
np.abs(lower_diagonal_values),
stats.scoreatpercentile,
"edge_threshold",
)
adjacency_matrix = adjacency_matrix.copy()
threshold_mask = np.abs(adjacency_matrix) < edge_threshold
adjacency_matrix[threshold_mask] = 0
lower_triangular_adjacency_matrix = np.tril(adjacency_matrix, k=-1)
non_zero_indices = lower_triangular_adjacency_matrix.nonzero()
line_coords = [node_coords[list(index)] for index in zip(*non_zero_indices)]
adjacency_matrix_values = adjacency_matrix[non_zero_indices]
for ax in self.axes.values():
ax._add_markers(node_coords, node_color, node_size, **node_kwargs)
if line_coords:
ax._add_lines(
line_coords,
adjacency_matrix_values,
edge_cmap,
vmin=edge_vmin,
vmax=edge_vmax,
**edge_kwargs,
)
# To obtain the brain left view, we simply invert the x axis
if ax.direction == "l" and not (ax.ax.get_xlim()[0] > ax.ax.get_xlim()[1]):
ax.ax.invert_xaxis()
if colorbar:
self._colorbar = colorbar
self._show_colorbar(ax.cmap, ax.norm, threshold=edge_threshold)
plt.draw_if_interactive()
|
def add_graph(
self,
adjacency_matrix,
node_coords,
node_color="auto",
node_size=50,
edge_cmap=cm.bwr,
edge_vmin=None,
edge_vmax=None,
edge_threshold=None,
edge_kwargs=None,
node_kwargs=None,
colorbar=False,
):
"""Plot undirected graph on each of the axes
Parameters
----------
adjacency_matrix: numpy array of shape (n, n)
represents the edges strengths of the graph. Assumed to be
a symmetric matrix.
node_coords: numpy array_like of shape (n, 3)
3d coordinates of the graph nodes in world space.
node_color: color or sequence of colors
color(s) of the nodes.
node_size: scalar or array_like
size(s) of the nodes in points^2.
edge_cmap: colormap
colormap used for representing the strength of the edges.
edge_vmin: float, optional, default: None
edge_vmax: float, optional, default: None
If not None, either or both of these values will be used to
as the minimum and maximum values to color edges. If None are
supplied the maximum absolute value within the given threshold
will be used as minimum (multiplied by -1) and maximum
coloring levels.
edge_threshold: str or number
If it is a number only the edges with a value greater than
edge_threshold will be shown.
If it is a string it must finish with a percent sign,
e.g. "25.3%", and only the edges with a abs(value) above
the given percentile will be shown.
edge_kwargs: dict
will be passed as kwargs for each edge matlotlib Line2D.
node_kwargs: dict
will be passed as kwargs to the plt.scatter call that plots all
the nodes in one go.
"""
# set defaults
if edge_kwargs is None:
edge_kwargs = {}
if node_kwargs is None:
node_kwargs = {}
if node_color == "auto":
nb_nodes = len(node_coords)
node_color = mpl_cm.Set2(np.linspace(0, 1, nb_nodes))
node_coords = np.asarray(node_coords)
# decompress input matrix if sparse
if sparse.issparse(adjacency_matrix):
adjacency_matrix = adjacency_matrix.toarray()
# make the lines below well-behaved
adjacency_matrix = np.nan_to_num(adjacency_matrix)
# safety checks
if "s" in node_kwargs:
raise ValueError(
"Please use 'node_size' and not 'node_kwargs' to specify node sizes"
)
if "c" in node_kwargs:
raise ValueError(
"Please use 'node_color' and not 'node_kwargs' to specify node colors"
)
adjacency_matrix_shape = adjacency_matrix.shape
if (
len(adjacency_matrix_shape) != 2
or adjacency_matrix_shape[0] != adjacency_matrix_shape[1]
):
raise ValueError(
"'adjacency_matrix' is supposed to have shape (n, n)."
" Its shape was {0}".format(adjacency_matrix_shape)
)
node_coords_shape = node_coords.shape
if len(node_coords_shape) != 2 or node_coords_shape[1] != 3:
message = (
"Invalid shape for 'node_coords'. You passed an "
"'adjacency_matrix' of shape {0} therefore "
"'node_coords' should be a array with shape ({0[0]}, 3) "
"while its shape was {1}"
).format(adjacency_matrix_shape, node_coords_shape)
raise ValueError(message)
if node_coords_shape[0] != adjacency_matrix_shape[0]:
raise ValueError(
"Shape mismatch between 'adjacency_matrix' "
"and 'node_coords'"
"'adjacency_matrix' shape is {0}, 'node_coords' shape is {1}".format(
adjacency_matrix_shape, node_coords_shape
)
)
if not np.allclose(adjacency_matrix, adjacency_matrix.T, rtol=1e-3):
raise ValueError("'adjacency_matrix' should be symmetric")
# For a masked array, masked values are replaced with zeros
if hasattr(adjacency_matrix, "mask"):
if not (adjacency_matrix.mask == adjacency_matrix.mask.T).all():
raise ValueError("'adjacency_matrix' was masked with a non symmetric mask")
adjacency_matrix = adjacency_matrix.filled(0)
if edge_threshold is not None:
# Keep a percentile of edges with the highest absolute
# values, so only need to look at the covariance
# coefficients below the diagonal
lower_diagonal_indices = np.tril_indices_from(adjacency_matrix, k=-1)
lower_diagonal_values = adjacency_matrix[lower_diagonal_indices]
edge_threshold = _utils.param_validation.check_threshold(
edge_threshold,
np.abs(lower_diagonal_values),
stats.scoreatpercentile,
"edge_threshold",
)
adjacency_matrix = adjacency_matrix.copy()
threshold_mask = np.abs(adjacency_matrix) < edge_threshold
adjacency_matrix[threshold_mask] = 0
lower_triangular_adjacency_matrix = np.tril(adjacency_matrix, k=-1)
non_zero_indices = lower_triangular_adjacency_matrix.nonzero()
line_coords = [node_coords[list(index)] for index in zip(*non_zero_indices)]
adjacency_matrix_values = adjacency_matrix[non_zero_indices]
for ax in self.axes.values():
ax._add_markers(node_coords, node_color, node_size, **node_kwargs)
if line_coords:
ax._add_lines(
line_coords,
adjacency_matrix_values,
edge_cmap,
vmin=edge_vmin,
vmax=edge_vmax,
**edge_kwargs,
)
# To obtain the brain left view, we simply invert the x axis
if ax.direction == "l" and not (ax.ax.get_xlim()[0] > ax.ax.get_xlim()[1]):
ax.ax.invert_xaxis()
if colorbar:
self._colorbar = colorbar
self._show_colorbar(ax.cmap, ax.norm, threshold=edge_threshold)
plt.draw_if_interactive()
|
https://github.com/nilearn/nilearn/issues/2303
|
ValueError Traceback (most recent call last)
<ipython-input-23-1935b6c630f1> in <module>()
----> 1 plotting.plot_connectome(connetome, coords, node_color=np.array(['red','seagreen','blue']))
/home/xiakon/.conda/envs/bigpype/lib/python2.7/site-packages/nilearn/plotting/img_plotting.pyc in plot_connectome(adjacency_matrix, node_coords, node_color, node_size, edge_cmap, edge_vmin, edge_vmax, edge_threshold, output_file, display_mode, figure, axes, title, annotate, black_bg, alpha, edge_kwargs, node_kwargs, colorbar)
1283 edge_threshold=edge_threshold,
1284 edge_kwargs=edge_kwargs, node_kwargs=node_kwargs,
-> 1285 colorbar=colorbar)
1286
1287 if output_file is not None:
/home/xiakon/.conda/envs/bigpype/lib/python2.7/site-packages/nilearn/plotting/displays.pyc in add_graph(self, adjacency_matrix, node_coords, node_color, node_size, edge_cmap, edge_vmin, edge_vmax, edge_threshold, edge_kwargs, node_kwargs, colorbar)
1750 if node_kwargs is None:
1751 node_kwargs = {}
-> 1752 if node_color == 'auto':
1753 nb_nodes = len(node_coords)
1754 node_color = mpl_cm.Set2(np.linspace(0, 1, nb_nodes))
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def view_stat_map(stat_map_img, threshold=None, bg_img=None, vmax=None):
"""
Insert a surface plot of a surface map into an HTML page.
Parameters
----------
stat_map_img : Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
The statistical map image. Should be 3D or
4D with exactly one time point (i.e. stat_map_img.shape[-1] = 1)
threshold : str, number or None, optional (default=None)
If None, no thresholding.
If it is a number only values of amplitude greater
than threshold will be shown.
If it is a string it must finish with a percent sign,
e.g. "25.3%", and only values of amplitude above the
given percentile will be shown.
bg_img : Niimg-like object, optional (default=None)
See http://nilearn.github.io/manipulating_images/input_output.html
The background image that the stat map will be plotted on top of.
If nothing is specified, the MNI152 template will be used.
vmax : float, optional (default=None)
Upper bound for plotting
Returns
-------
StatMapView : plot of the stat map.
It can be saved as an html page or rendered (transparently) by the
Jupyter notebook.
"""
stat_map_img = check_niimg_3d(stat_map_img, dtype="auto")
if bg_img is None:
bg_img = datasets.load_mni152_template()
bg_mask = datasets.load_mni152_brain_mask()
else:
bg_img = image.load_img(bg_img)
bg_mask = image.new_img_like(bg_img, bg_img.get_data() != 0)
stat_map_img = image.resample_to_img(stat_map_img, bg_img)
stat_map_img = image.new_img_like(
stat_map_img, stat_map_img.get_data() * bg_mask.get_data()
)
if threshold is None:
abs_threshold = "null"
else:
abs_threshold = check_threshold(
threshold, stat_map_img.get_data(), fast_abs_percentile
)
abs_threshold = str(abs_threshold)
if vmax is None:
vmax = np.abs(stat_map_img.get_data()).max()
html = get_html_template("stat_map_template.html")
html = html.replace("INSERT_STAT_MAP_DATA_HERE", _encode_nii(stat_map_img))
html = html.replace("INSERT_MNI_DATA_HERE", _encode_nii(bg_img))
html = html.replace("INSERT_ABS_MIN_HERE", abs_threshold)
html = html.replace("INSERT_ABS_MAX_HERE", str(vmax))
return StatMapView(html)
|
def view_stat_map(stat_map_img, threshold=None, bg_img=None, vmax=None):
"""
Insert a surface plot of a surface map into an HTML page.
Parameters
----------
stat_map_img : Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
The statistical map image
threshold : str, number or None, optional (default=None)
If None, no thresholding.
If it is a number only values of amplitude greater
than threshold will be shown.
If it is a string it must finish with a percent sign,
e.g. "25.3%", and only values of amplitude above the
given percentile will be shown.
bg_img : Niimg-like object, optional (default=None)
See http://nilearn.github.io/manipulating_images/input_output.html
The background image that the stat map will be plotted on top of.
If nothing is specified, the MNI152 template will be used.
vmax : float, optional (default=None)
Upper bound for plotting
Returns
-------
StatMapView : plot of the stat map.
It can be saved as an html page or rendered (transparently) by the
Jupyter notebook.
"""
if bg_img is None:
bg_img = datasets.load_mni152_template()
bg_mask = datasets.load_mni152_brain_mask()
else:
bg_img = image.load_img(bg_img)
bg_mask = image.new_img_like(bg_img, bg_img.get_data() != 0)
stat_map_img = image.resample_to_img(stat_map_img, bg_img)
stat_map_img = image.new_img_like(
stat_map_img, stat_map_img.get_data() * bg_mask.get_data()
)
if threshold is None:
abs_threshold = "null"
else:
abs_threshold = check_threshold(
threshold, stat_map_img.get_data(), fast_abs_percentile
)
abs_threshold = str(abs_threshold)
if vmax is None:
vmax = np.abs(stat_map_img.get_data()).max()
html = get_html_template("stat_map_template.html")
html = html.replace("INSERT_STAT_MAP_DATA_HERE", _encode_nii(stat_map_img))
html = html.replace("INSERT_MNI_DATA_HERE", _encode_nii(bg_img))
html = html.replace("INSERT_ABS_MIN_HERE", abs_threshold)
html = html.replace("INSERT_ABS_MAX_HERE", str(vmax))
return StatMapView(html)
|
https://github.com/nilearn/nilearn/issues/1725
|
plotting.view_stat_map(decoder.coef_img_, bg_img=haxby_dataset.anat[0])
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-30-dde0b99b81f1> in <module>()
----> 1 plotting.view_stat_map(decoder.coef_img_, bg_img=haxby_dataset.anat[0])
/home/varoquau/dev/nilearn/nilearn/plotting/html_stat_map.py in view_stat_map(stat_map_img, threshold, bg_img, vmax)
90 stat_map_img = image.resample_to_img(stat_map_img, bg_img)
91 stat_map_img = image.new_img_like(
---> 92 stat_map_img, stat_map_img.get_data() * bg_mask.get_data())
93 if threshold is None:
94 abs_threshold = 'null'
ValueError: operands could not be broadcast together with shapes (124,256,256,1) (124,256,256)
|
ValueError
|
def add_markers(self, marker_coords, marker_color="r", marker_size=30, **kwargs):
"""Add markers to the plot.
Parameters
----------
marker_coords: array of size (n_markers, 3)
Coordinates of the markers to plot. For each slice, only markers
that are 2 millimeters away from the slice are plotted.
marker_color: pyplot compatible color or list of shape (n_markers,)
List of colors for each marker that can be string or matplotlib
colors
marker_size: single float or list of shape (n_markers,)
Size in pixel for each marker
"""
defaults = {"marker": "o", "zorder": 1000}
marker_coords = np.asanyarray(marker_coords)
for k, v in defaults.items():
kwargs.setdefault(k, v)
for display_ax in self.axes.values():
direction = display_ax.direction
coord = display_ax.coord
marker_coords_2d, third_d = _coords_3d_to_2d(
marker_coords, direction, return_direction=True
)
xdata, ydata = marker_coords_2d.T
# Check if coord has integer represents a cut in direction
# to follow the heuristic. If no foreground image is given
# coordinate is empty or None. This case is valid for plotting
# markers on glass brain without any foreground image.
if isinstance(coord, numbers.Number):
# Heuristic that plots only markers that are 2mm away
# from the current slice.
# XXX: should we keep this heuristic?
mask = np.abs(third_d - coord) <= 2.0
xdata = xdata[mask]
ydata = ydata[mask]
display_ax.ax.scatter(xdata, ydata, s=marker_size, c=marker_color, **kwargs)
|
def add_markers(self, marker_coords, marker_color="r", marker_size=30, **kwargs):
"""Add markers to the plot.
Parameters
----------
marker_coords: array of size (n_markers, 3)
Coordinates of the markers to plot. For each slice, only markers
that are 2 millimeters away from the slice are plotted.
marker_color: pyplot compatible color or list of shape (n_markers,)
List of colors for each marker that can be string or matplotlib
colors
marker_size: single float or list of shape (n_markers,)
Size in pixel for each marker
"""
defaults = {"marker": "o", "zorder": 1000}
marker_coords = np.asanyarray(marker_coords)
for k, v in defaults.items():
kwargs.setdefault(k, v)
for display_ax in self.axes.values():
direction = display_ax.direction
coord = display_ax.coord
marker_coords_2d, third_d = _coords_3d_to_2d(
marker_coords, direction, return_direction=True
)
# Heuristic that plots only markers that are 2mm away from the
# current slice.
# XXX: should we keep this heuristic?
mask = np.abs(third_d - coord) <= 2.0
xdata, ydata = marker_coords_2d.T
display_ax.ax.scatter(
xdata[mask], ydata[mask], s=marker_size, c=marker_color, **kwargs
)
|
https://github.com/nilearn/nilearn/issues/1595
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-10-8bfe69e50628> in <module>()
4 # and third argument `marker_size` denotes size of the sphere
5 coords = [(-34, -39, -9)]
----> 6 fig.add_markers(coords)
C:\Anaconda2\lib\site-packages\nilearn\plotting\displays.pyc in add_markers(self, marker_coords, marker_color, marker_size, **kwargs)
833 # current slice.
834 # XXX: should we keep this heuristic?
--> 835 mask = np.abs(third_d - coord) <= 2.
836 xdata, ydata = marker_coords_2d.T
837 display_ax.ax.scatter(xdata[mask], ydata[mask],
TypeError: unsupported operand type(s) for -: 'int' and 'NoneType'
|
TypeError
|
def fast_abs_percentile(data, percentile=80):
"""A fast version of the percentile of the absolute value.
Parameters
==========
data: ndarray, possibly masked array
The input data
percentile: number between 0 and 100
The percentile that we are asking for
Returns
=======
value: number
The score at percentile
Notes
=====
This is a faster, and less accurate version of
scipy.stats.scoreatpercentile(np.abs(data), percentile)
"""
if hasattr(data, "mask"):
# Catter for masked arrays
data = np.asarray(data[np.logical_not(data.mask)])
data = np.abs(data)
data = data.ravel()
index = int(data.size * 0.01 * percentile)
if partition is not None:
# Partial sort: faster than sort
data = partition(data, index)
else:
data.sort()
return data[index]
|
def fast_abs_percentile(data, percentile=80):
"""A fast version of the percentile of the absolute value.
Parameters
==========
data: ndarray, possibly masked array
The input data
percentile: number between 0 and 100
The percentile that we are asking for
Returns
=======
value: number
The score at percentile
Notes
=====
This is a faster, and less accurate version of
scipy.stats.scoreatpercentile(np.abs(data), percentile)
"""
if hasattr(data, "mask"):
# Catter for masked arrays
data = np.asarray(data[np.logical_not(data.mask)])
data = np.abs(data)
data = data.ravel()
index = int(data.size * 0.01 * percentile)
if partition is not None:
# Partial sort: faster than sort
return partition(data, index)[index + 1]
data.sort()
return data[index + 1]
|
https://github.com/nilearn/nilearn/issues/872
|
IndexError Traceback (most recent call last)
<ipython-input-29-094589b8fe4e> in <module>()
----> 1 fast_abs_percentile(arange(4))
/home/elvis/CODE/FORKED/nilearn/nilearn/_utils/extmath.pyc in fast_abs_percentile(data, percentile)
43 if partition is not None:
44 # Partial sort: faster than sort
---> 45 return partition(data, index)[index + 1]
46 data.sort()
47 return data[index + 1]
IndexError: index out of bounds
/home/elvis/CODE/FORKED/nilearn/nilearn/_utils/extmath.py(45)fast_abs_percentile()
44 # Partial sort: faster than sort
---> 45 return partition(data, index)[index + 1]
46 data.sort()
|
IndexError
|
def _log_started_message(self, listeners: List[socket.SocketType]) -> None:
config = self.config
if config.fd is not None:
sock = listeners[0]
logger.info(
"Uvicorn running on socket %s (Press CTRL+C to quit)",
sock.getsockname(),
)
elif config.uds is not None:
logger.info(
"Uvicorn running on unix socket %s (Press CTRL+C to quit)", config.uds
)
else:
addr_format = "%s://%s:%d"
host = "0.0.0.0" if config.host is None else config.host
if ":" in host:
# It's an IPv6 address.
addr_format = "%s://[%s]:%d"
port = config.port
if port == 0:
port = listeners[0].getsockname()[1]
protocol_name = "https" if config.ssl else "http"
message = f"Uvicorn running on {addr_format} (Press CTRL+C to quit)"
color_message = (
"Uvicorn running on "
+ click.style(addr_format, bold=True)
+ " (Press CTRL+C to quit)"
)
logger.info(
message,
protocol_name,
host,
port,
extra={"color_message": color_message},
)
|
def _log_started_message(self, listeners: List[socket.SocketType]) -> None:
config = self.config
if config.fd is not None:
sock = listeners[0]
logger.info(
"Uvicorn running on socket %s (Press CTRL+C to quit)",
sock.getsockname(),
)
elif config.uds is not None:
logger.info(
"Uvicorn running on unix socket %s (Press CTRL+C to quit)", config.uds
)
else:
addr_format = "%s://%s:%d"
host = "0.0.0.0" if config.host is None else config.host
if ":" in host:
# It's an IPv6 address.
addr_format = "%s://[%s]:%d"
port = config.port
if port == 0:
port = listeners[0].getpeername()[1]
protocol_name = "https" if config.ssl else "http"
message = f"Uvicorn running on {addr_format} (Press CTRL+C to quit)"
color_message = (
"Uvicorn running on "
+ click.style(addr_format, bold=True)
+ " (Press CTRL+C to quit)"
)
logger.info(
message,
protocol_name,
host,
port,
extra={"color_message": color_message},
)
|
https://github.com/encode/uvicorn/issues/974
|
[12:03:58] D:\sand\uvicorn-port-error
λ venv\win32-3.8\Scripts\uvicorn example:app --port=0
INFO: Started server process [31268]
INFO: Waiting for application startup.
INFO: ASGI 'lifespan' protocol appears unsupported.
INFO: Application startup complete.
Traceback (most recent call last):
File "C:\Program Files\Python38\lib\runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Program Files\Python38\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "D:\sand\uvicorn-port-error\venv\win32-3.8\Scripts\uvicorn.exe\__main__.py", line 7, in <module>
File "d:\sand\uvicorn-port-error\venv\win32-3.8\lib\site-packages\click\core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "d:\sand\uvicorn-port-error\venv\win32-3.8\lib\site-packages\click\core.py", line 782, in main
rv = self.invoke(ctx)
File "d:\sand\uvicorn-port-error\venv\win32-3.8\lib\site-packages\click\core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "d:\sand\uvicorn-port-error\venv\win32-3.8\lib\site-packages\click\core.py", line 610, in invoke
return callback(*args, **kwargs)
File "d:\sand\uvicorn-port-error\venv\win32-3.8\lib\site-packages\uvicorn\main.py", line 362, in main
run(**kwargs)
File "d:\sand\uvicorn-port-error\venv\win32-3.8\lib\site-packages\uvicorn\main.py", line 386, in run
server.run()
File "d:\sand\uvicorn-port-error\venv\win32-3.8\lib\site-packages\uvicorn\server.py", line 49, in run
loop.run_until_complete(self.serve(sockets=sockets))
File "C:\Program Files\Python38\lib\asyncio\base_events.py", line 616, in run_until_complete
return future.result()
File "d:\sand\uvicorn-port-error\venv\win32-3.8\lib\site-packages\uvicorn\server.py", line 66, in serve
await self.startup(sockets=sockets)
File "d:\sand\uvicorn-port-error\venv\win32-3.8\lib\site-packages\uvicorn\server.py", line 158, in startup
self._log_started_message(listeners)
File "d:\sand\uvicorn-port-error\venv\win32-3.8\lib\site-packages\uvicorn\server.py", line 190, in _log_started_message
port = listeners[0].getpeername()[1]
File "C:\Program Files\Python38\lib\asyncio\trsock.py", line 85, in getpeername
return self._sock.getpeername()
OSError: [WinError 10057] A request to send or receive data was disallowed because the socket is not connected and (when sending on a datagram socket using a sendto call) no address was supplied
|
OSError
|
async def asgi_send(self, message):
message_type = message["type"]
if not self.handshake_started_event.is_set():
if message_type == "websocket.accept":
self.logger.info(
'%s - "WebSocket %s" [accepted]',
self.scope["client"],
self.scope["root_path"] + self.scope["path"],
)
self.initial_response = None
self.accepted_subprotocol = message.get("subprotocol")
self.handshake_started_event.set()
elif message_type == "websocket.close":
self.logger.info(
'%s - "WebSocket %s" 403',
self.scope["client"],
self.scope["root_path"] + self.scope["path"],
)
self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b"")
self.handshake_started_event.set()
self.closed_event.set()
else:
msg = (
"Expected ASGI message 'websocket.accept' or 'websocket.close', "
"but got '%s'."
)
raise RuntimeError(msg % message_type)
elif not self.closed_event.is_set():
await self.handshake_completed_event.wait()
if message_type == "websocket.send":
bytes_data = message.get("bytes")
text_data = message.get("text")
data = text_data if bytes_data is None else bytes_data
await self.send(data)
elif message_type == "websocket.close":
code = message.get("code", 1000)
self.close_code = code # for WebSocketServerProtocol
await self.close(code)
self.closed_event.set()
else:
msg = (
"Expected ASGI message 'websocket.send' or 'websocket.close',"
" but got '%s'."
)
raise RuntimeError(msg % message_type)
else:
msg = "Unexpected ASGI message '%s', after sending 'websocket.close'."
raise RuntimeError(msg % message_type)
|
async def asgi_send(self, message):
message_type = message["type"]
if not self.handshake_started_event.is_set():
if message_type == "websocket.accept":
self.logger.info(
'%s - "WebSocket %s" [accepted]',
self.scope["client"],
self.scope["root_path"] + self.scope["path"],
)
self.initial_response = None
self.accepted_subprotocol = message.get("subprotocol")
self.handshake_started_event.set()
elif message_type == "websocket.close":
self.logger.info(
'%s - "WebSocket %s" 403',
self.scope["client"],
self.scope["root_path"] + self.scope["path"],
)
self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b"")
self.handshake_started_event.set()
self.closed_event.set()
else:
msg = (
"Expected ASGI message 'websocket.accept' or 'websocket.close', "
"but got '%s'."
)
raise RuntimeError(msg % message_type)
elif not self.closed_event.is_set():
await self.handshake_completed_event.wait()
if message_type == "websocket.send":
bytes_data = message.get("bytes")
text_data = message.get("text")
data = text_data if bytes_data is None else bytes_data
await self.send(data)
elif message_type == "websocket.close":
code = message.get("code", 1000)
await self.close(code)
self.closed_event.set()
else:
msg = (
"Expected ASGI message 'websocket.send' or 'websocket.close',"
" but got '%s'."
)
raise RuntimeError(msg % message_type)
else:
msg = "Unexpected ASGI message '%s', after sending 'websocket.close'."
raise RuntimeError(msg % message_type)
|
https://github.com/encode/uvicorn/issues/244
|
Traceback (most recent call last):
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py", line 140, in run_asgi
result = await asgi(self.asgi_receive, self.asgi_send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/sessions.py", line 179, in __call__
return await self.inner(receive, self.send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/middleware.py", line 41, in coroutine_call
await inner_instance(receive, send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/consumer.py", line 59, in __call__
[receive, self.channel_receive], self.dispatch
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/utils.py", line 59, in await_many_dispatch
await task
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/utils.py", line 51, in await_many_dispatch
result = task.result()
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py", line 222, in asgi_receive
data = await self.recv()
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/websockets/protocol.py", line 419, in recv
return_when=asyncio.FIRST_COMPLETED,
File "/usr/lib/python3.7/asyncio/tasks.py", line 361, in wait
fs = {ensure_future(f, loop=loop) for f in set(fs)}
File "/usr/lib/python3.7/asyncio/tasks.py", line 361, in <setcomp>
fs = {ensure_future(f, loop=loop) for f in set(fs)}
File "/usr/lib/python3.7/asyncio/tasks.py", line 592, in ensure_future
raise TypeError('An asyncio.Future, a coroutine or an awaitable is '
TypeError: An asyncio.Future, a coroutine or an awaitable is required
|
TypeError
|
async def asgi_receive(self):
if not self.connect_sent:
self.connect_sent = True
return {"type": "websocket.connect"}
await self.handshake_completed_event.wait()
if self.closed_event.is_set():
# If the client disconnected: WebSocketServerProtocol set self.close_code.
# If the handshake failed or the app closed before handshake completion,
# use 1006 Abnormal Closure.
code = getattr(self, "close_code", 1006)
return {"type": "websocket.disconnect", "code": code}
try:
data = await self.recv()
except websockets.ConnectionClosed as exc:
return {"type": "websocket.disconnect", "code": exc.code}
msg = {"type": "websocket.receive"}
if isinstance(data, str):
msg["text"] = data
else:
msg["bytes"] = data
return msg
|
async def asgi_receive(self):
if not self.connect_sent:
self.connect_sent = True
return {"type": "websocket.connect"}
await self.handshake_completed_event.wait()
try:
data = await self.recv()
except websockets.ConnectionClosed as exc:
return {"type": "websocket.disconnect", "code": exc.code}
msg = {"type": "websocket.receive"}
if isinstance(data, str):
msg["text"] = data
else:
msg["bytes"] = data
return msg
|
https://github.com/encode/uvicorn/issues/244
|
Traceback (most recent call last):
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py", line 140, in run_asgi
result = await asgi(self.asgi_receive, self.asgi_send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/sessions.py", line 179, in __call__
return await self.inner(receive, self.send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/middleware.py", line 41, in coroutine_call
await inner_instance(receive, send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/consumer.py", line 59, in __call__
[receive, self.channel_receive], self.dispatch
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/utils.py", line 59, in await_many_dispatch
await task
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/utils.py", line 51, in await_many_dispatch
result = task.result()
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py", line 222, in asgi_receive
data = await self.recv()
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/websockets/protocol.py", line 419, in recv
return_when=asyncio.FIRST_COMPLETED,
File "/usr/lib/python3.7/asyncio/tasks.py", line 361, in wait
fs = {ensure_future(f, loop=loop) for f in set(fs)}
File "/usr/lib/python3.7/asyncio/tasks.py", line 361, in <setcomp>
fs = {ensure_future(f, loop=loop) for f in set(fs)}
File "/usr/lib/python3.7/asyncio/tasks.py", line 592, in ensure_future
raise TypeError('An asyncio.Future, a coroutine or an awaitable is '
TypeError: An asyncio.Future, a coroutine or an awaitable is required
|
TypeError
|
async def send(self, message):
await self.writable.wait()
message_type = message["type"]
if not self.handshake_complete:
if message_type == "websocket.accept":
self.logger.info(
'%s - "WebSocket %s" [accepted]',
self.scope["client"],
self.scope["root_path"] + self.scope["path"],
)
self.handshake_complete = True
subprotocol = message.get("subprotocol")
output = self.conn.send(
wsproto.events.AcceptConnection(
subprotocol=subprotocol, extensions=[PerMessageDeflate()]
)
)
self.transport.write(output)
elif message_type == "websocket.close":
self.queue.put_nowait({"type": "websocket.disconnect", "code": None})
self.logger.info(
'%s - "WebSocket %s" 403',
self.scope["client"],
self.scope["root_path"] + self.scope["path"],
)
self.handshake_complete = True
self.close_sent = True
msg = events.RejectConnection(status_code=403, headers=[])
output = self.conn.send(msg)
self.transport.write(output)
self.transport.close()
else:
msg = (
"Expected ASGI message 'websocket.accept' or 'websocket.close', "
"but got '%s'."
)
raise RuntimeError(msg % message_type)
elif not self.close_sent:
if message_type == "websocket.send":
bytes_data = message.get("bytes")
text_data = message.get("text")
data = text_data if bytes_data is None else bytes_data
output = self.conn.send(wsproto.events.Message(data=data))
if not self.transport.is_closing():
self.transport.write(output)
elif message_type == "websocket.close":
self.close_sent = True
code = message.get("code", 1000)
self.queue.put_nowait({"type": "websocket.disconnect", "code": code})
output = self.conn.send(wsproto.events.CloseConnection(code=code))
if not self.transport.is_closing():
self.transport.write(output)
self.transport.close()
else:
msg = (
"Expected ASGI message 'websocket.send' or 'websocket.close',"
" but got '%s'."
)
raise RuntimeError(msg % message_type)
else:
msg = "Unexpected ASGI message '%s', after sending 'websocket.close'."
raise RuntimeError(msg % message_type)
|
async def send(self, message):
await self.writable.wait()
message_type = message["type"]
if not self.handshake_complete:
if message_type == "websocket.accept":
self.logger.info(
'%s - "WebSocket %s" [accepted]',
self.scope["client"],
self.scope["root_path"] + self.scope["path"],
)
self.handshake_complete = True
subprotocol = message.get("subprotocol")
output = self.conn.send(
wsproto.events.AcceptConnection(
subprotocol=subprotocol, extensions=[PerMessageDeflate()]
)
)
self.transport.write(output)
elif message_type == "websocket.close":
self.queue.put_nowait({"type": "websocket.disconnect", "code": None})
self.logger.info(
'%s - "WebSocket %s" 403',
self.scope["client"],
self.scope["root_path"] + self.scope["path"],
)
self.handshake_complete = True
self.close_sent = True
msg = h11.Response(status_code=403, headers=[])
output = self.conn.send(msg)
msg = h11.EndOfMessage()
output += self.conn.send(msg)
self.transport.write(output)
self.transport.close()
else:
msg = (
"Expected ASGI message 'websocket.accept' or 'websocket.close', "
"but got '%s'."
)
raise RuntimeError(msg % message_type)
elif not self.close_sent:
if message_type == "websocket.send":
bytes_data = message.get("bytes")
text_data = message.get("text")
data = text_data if bytes_data is None else bytes_data
output = self.conn.send(wsproto.events.Message(data=data))
if not self.transport.is_closing():
self.transport.write(output)
elif message_type == "websocket.close":
self.close_sent = True
code = message.get("code", 1000)
self.queue.put_nowait({"type": "websocket.disconnect", "code": code})
output = self.conn.send(wsproto.events.CloseConnection(code=code))
if not self.transport.is_closing():
self.transport.write(output)
self.transport.close()
else:
msg = (
"Expected ASGI message 'websocket.send' or 'websocket.close',"
" but got '%s'."
)
raise RuntimeError(msg % message_type)
else:
msg = "Unexpected ASGI message '%s', after sending 'websocket.close'."
raise RuntimeError(msg % message_type)
|
https://github.com/encode/uvicorn/issues/244
|
Traceback (most recent call last):
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py", line 140, in run_asgi
result = await asgi(self.asgi_receive, self.asgi_send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/sessions.py", line 179, in __call__
return await self.inner(receive, self.send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/middleware.py", line 41, in coroutine_call
await inner_instance(receive, send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/consumer.py", line 59, in __call__
[receive, self.channel_receive], self.dispatch
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/utils.py", line 59, in await_many_dispatch
await task
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/utils.py", line 51, in await_many_dispatch
result = task.result()
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py", line 222, in asgi_receive
data = await self.recv()
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/websockets/protocol.py", line 419, in recv
return_when=asyncio.FIRST_COMPLETED,
File "/usr/lib/python3.7/asyncio/tasks.py", line 361, in wait
fs = {ensure_future(f, loop=loop) for f in set(fs)}
File "/usr/lib/python3.7/asyncio/tasks.py", line 361, in <setcomp>
fs = {ensure_future(f, loop=loop) for f in set(fs)}
File "/usr/lib/python3.7/asyncio/tasks.py", line 592, in ensure_future
raise TypeError('An asyncio.Future, a coroutine or an awaitable is '
TypeError: An asyncio.Future, a coroutine or an awaitable is required
|
TypeError
|
async def asgi_send(self, message):
message_type = message["type"]
if not self.handshake_started_event.is_set():
if message_type == "websocket.accept":
self.logger.info(
'%s - "WebSocket %s" [accepted]',
self.scope["client"],
self.scope["root_path"] + self.scope["path"],
)
self.initial_response = None
self.accepted_subprotocol = message.get("subprotocol")
self.handshake_started_event.set()
elif message_type == "websocket.close":
self.logger.info(
'%s - "WebSocket %s" 403',
self.scope["client"],
self.scope["root_path"] + self.scope["path"],
)
self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b"")
self.handshake_started_event.set()
self.closed_event.set()
else:
msg = (
"Expected ASGI message 'websocket.accept' or 'websocket.close', "
"but got '%s'."
)
raise RuntimeError(msg % message_type)
elif not self.closed_event.is_set():
await self.handshake_completed_event.wait()
if message_type == "websocket.send":
bytes_data = message.get("bytes")
text_data = message.get("text")
data = text_data if bytes_data is None else bytes_data
await self.send(data)
elif message_type == "websocket.close":
code = message.get("code", 1000)
await self.close(code)
self.closed_event.set()
else:
msg = (
"Expected ASGI message 'websocket.send' or 'websocket.close',"
" but got '%s'."
)
raise RuntimeError(msg % message_type)
else:
msg = "Unexpected ASGI message '%s', after sending 'websocket.close'."
raise RuntimeError(msg % message_type)
|
async def asgi_send(self, message):
message_type = message["type"]
if not self.handshake_started_event.is_set():
if message_type == "websocket.accept":
self.logger.info(
'%s - "WebSocket %s" [accepted]',
self.scope["client"],
self.scope["root_path"] + self.scope["path"],
)
self.initial_response = None
self.accepted_subprotocol = message.get("subprotocol")
self.handshake_started_event.set()
elif message_type == "websocket.close":
self.logger.info(
'%s - "WebSocket %s" 403',
self.scope["client"],
self.scope["root_path"] + self.scope["path"],
)
self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b"")
self.handshake_started_event.set()
self.closed_event.set()
else:
msg = (
"Expected ASGI message 'websocket.accept' or 'websocket.close', "
"but got '%s'."
)
raise RuntimeError(msg % message_type)
elif not self.closed_event.is_set():
await self.handshake_completed_event.wait()
if message_type == "websocket.send":
bytes_data = message.get("bytes")
text_data = message.get("text")
data = text_data if bytes_data is None else bytes_data
await self.send(data)
elif message_type == "websocket.close":
code = message.get("code", 1000)
self.close_code = code # for WebSocketServerProtocol
await self.close(code)
self.closed_event.set()
else:
msg = (
"Expected ASGI message 'websocket.send' or 'websocket.close',"
" but got '%s'."
)
raise RuntimeError(msg % message_type)
else:
msg = "Unexpected ASGI message '%s', after sending 'websocket.close'."
raise RuntimeError(msg % message_type)
|
https://github.com/encode/uvicorn/issues/244
|
Traceback (most recent call last):
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py", line 140, in run_asgi
result = await asgi(self.asgi_receive, self.asgi_send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/sessions.py", line 179, in __call__
return await self.inner(receive, self.send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/middleware.py", line 41, in coroutine_call
await inner_instance(receive, send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/consumer.py", line 59, in __call__
[receive, self.channel_receive], self.dispatch
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/utils.py", line 59, in await_many_dispatch
await task
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/utils.py", line 51, in await_many_dispatch
result = task.result()
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py", line 222, in asgi_receive
data = await self.recv()
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/websockets/protocol.py", line 419, in recv
return_when=asyncio.FIRST_COMPLETED,
File "/usr/lib/python3.7/asyncio/tasks.py", line 361, in wait
fs = {ensure_future(f, loop=loop) for f in set(fs)}
File "/usr/lib/python3.7/asyncio/tasks.py", line 361, in <setcomp>
fs = {ensure_future(f, loop=loop) for f in set(fs)}
File "/usr/lib/python3.7/asyncio/tasks.py", line 592, in ensure_future
raise TypeError('An asyncio.Future, a coroutine or an awaitable is '
TypeError: An asyncio.Future, a coroutine or an awaitable is required
|
TypeError
|
async def asgi_receive(self):
if not self.connect_sent:
self.connect_sent = True
return {"type": "websocket.connect"}
await self.handshake_completed_event.wait()
try:
await self.ensure_open()
data = await self.recv()
except websockets.ConnectionClosed as exc:
return {"type": "websocket.disconnect", "code": exc.code}
msg = {"type": "websocket.receive"}
if isinstance(data, str):
msg["text"] = data
else:
msg["bytes"] = data
return msg
|
async def asgi_receive(self):
if not self.connect_sent:
self.connect_sent = True
return {"type": "websocket.connect"}
await self.handshake_completed_event.wait()
if self.closed_event.is_set():
# If the client disconnected: WebSocketServerProtocol set self.close_code.
# If the handshake failed or the app closed before handshake completion,
# use 1006 Abnormal Closure.
code = getattr(self, "close_code", 1006)
return {"type": "websocket.disconnect", "code": code}
try:
data = await self.recv()
except websockets.ConnectionClosed as exc:
return {"type": "websocket.disconnect", "code": exc.code}
msg = {"type": "websocket.receive"}
if isinstance(data, str):
msg["text"] = data
else:
msg["bytes"] = data
return msg
|
https://github.com/encode/uvicorn/issues/244
|
Traceback (most recent call last):
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py", line 140, in run_asgi
result = await asgi(self.asgi_receive, self.asgi_send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/sessions.py", line 179, in __call__
return await self.inner(receive, self.send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/middleware.py", line 41, in coroutine_call
await inner_instance(receive, send)
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/consumer.py", line 59, in __call__
[receive, self.channel_receive], self.dispatch
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/utils.py", line 59, in await_many_dispatch
await task
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/channels/utils.py", line 51, in await_many_dispatch
result = task.result()
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py", line 222, in asgi_receive
data = await self.recv()
File "/home/ossi/src/channels-examples/.venv/lib/python3.7/site-packages/websockets/protocol.py", line 419, in recv
return_when=asyncio.FIRST_COMPLETED,
File "/usr/lib/python3.7/asyncio/tasks.py", line 361, in wait
fs = {ensure_future(f, loop=loop) for f in set(fs)}
File "/usr/lib/python3.7/asyncio/tasks.py", line 361, in <setcomp>
fs = {ensure_future(f, loop=loop) for f in set(fs)}
File "/usr/lib/python3.7/asyncio/tasks.py", line 592, in ensure_future
raise TypeError('An asyncio.Future, a coroutine or an awaitable is '
TypeError: An asyncio.Future, a coroutine or an awaitable is required
|
TypeError
|
def connection_lost(self, exc):
if self.access_logs:
self.logger.debug("%s - Disconnected", self.server[0])
if self.cycle and not self.cycle.response_complete:
self.cycle.disconnected = True
if self.conn.our_state != h11.ERROR:
event = h11.ConnectionClosed()
try:
self.conn.send(event)
except h11.LocalProtocolError:
# Premature client disconnect
pass
self.client_event.set()
|
def connection_lost(self, exc):
if self.access_logs:
self.logger.debug("%s - Disconnected", self.server[0])
if self.cycle and self.cycle.more_body:
self.cycle.disconnected = True
if self.conn.our_state != h11.ERROR:
event = h11.ConnectionClosed()
self.conn.send(event)
self.client_event.set()
|
https://github.com/encode/uvicorn/issues/111
|
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/uvicorn/protocols/http/httptools.py", line 196, in run_asgi
result = await asgi(self.receive, self.send)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/channels/http.py", line 190, in __call__
await self.handle(body)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/asgiref/sync.py", line 110, in __call__
return await asyncio.wait_for(future, timeout=None)
File "/usr/lib/python3.6/asyncio/tasks.py", line 339, in wait_for
return (yield from fut)
File "/usr/lib/python3.6/concurrent/futures/thread.py", line 56, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/asgiref/sync.py", line 125, in thread_handler
return self.func(*args, **kwargs)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/channels/http.py", line 229, in handle
self.send(response_message)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/asgiref/sync.py", line 64, in __call__
return call_result.result()
File "/usr/lib/python3.6/concurrent/futures/_base.py", line 432, in result
return self.__get_result()
File "/usr/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/asgiref/sync.py", line 78, in main_wrap
result = await self.awaitable(*args, **kwargs)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/uvicorn/protocols/http/httptools.py", line 308, in send
protocol.transport.write(body)
File "uvloop/handles/stream.pyx", line 636, in uvloop.loop.UVStream.write
File "uvloop/handles/handle.pyx", line 165, in uvloop.loop.UVHandle._ensure_alive
RuntimeError: unable to perform operation on <TCPTransport closed=True reading=False 0x1a48ad8>; the handler is closed
|
RuntimeError
|
async def send(self, message):
global DEFAULT_HEADERS
protocol = self.protocol
message_type = message["type"]
if self.disconnected:
return
if not protocol.writable:
await protocol.writable_event.wait()
if not self.response_started:
# Sending response status line and headers
if message_type != "http.response.start":
msg = "Expected ASGI message 'http.response.start', but got '%s'."
raise RuntimeError(msg % message_type)
self.response_started = True
status_code = message["status"]
headers = DEFAULT_HEADERS + message.get("headers", [])
if protocol.access_logs:
protocol.logger.info(
'%s - "%s %s HTTP/%s" %d',
self.scope["server"][0],
self.scope["method"],
self.scope["path"],
self.scope["http_version"],
status_code,
)
# Write response status line and headers
reason = STATUS_PHRASES[status_code]
event = h11.Response(status_code=status_code, headers=headers, reason=reason)
output = protocol.conn.send(event)
protocol.transport.write(output)
elif not self.response_complete:
# Sending response body
if message_type != "http.response.body":
msg = "Expected ASGI message 'http.response.body', but got '%s'."
raise RuntimeError(msg % message_type)
body = message.get("body", b"")
more_body = message.get("more_body", False)
# Write response body
event = h11.Data(data=body)
output = protocol.conn.send(event)
protocol.transport.write(output)
# Handle response completion
if not more_body:
self.response_complete = True
event = h11.EndOfMessage()
output = protocol.conn.send(event)
protocol.transport.write(output)
else:
# Response already sent
msg = "Unexpected ASGI message '%s' sent, after response already completed."
raise RuntimeError(msg % message_type)
if protocol.conn.our_state is h11.MUST_CLOSE:
event = h11.ConnectionClosed()
protocol.conn.send(event)
protocol.transport.close()
elif protocol.conn.our_state is h11.DONE and protocol.conn.their_state is h11.DONE:
protocol.resume_reading()
protocol.conn.start_next_cycle()
|
async def send(self, message):
global DEFAULT_HEADERS
protocol = self.protocol
message_type = message["type"]
if not protocol.writable:
await protocol.writable_event.wait()
if not self.response_started:
# Sending response status line and headers
if message_type != "http.response.start":
msg = "Expected ASGI message 'http.response.start', but got '%s'."
raise RuntimeError(msg % message_type)
self.response_started = True
status_code = message["status"]
headers = DEFAULT_HEADERS + message.get("headers", [])
if protocol.access_logs:
protocol.logger.info(
'%s - "%s %s HTTP/%s" %d',
self.scope["server"][0],
self.scope["method"],
self.scope["path"],
self.scope["http_version"],
status_code,
)
# Write response status line and headers
reason = STATUS_PHRASES[status_code]
event = h11.Response(status_code=status_code, headers=headers, reason=reason)
output = protocol.conn.send(event)
protocol.transport.write(output)
elif not self.response_complete:
# Sending response body
if message_type != "http.response.body":
msg = "Expected ASGI message 'http.response.body', but got '%s'."
raise RuntimeError(msg % message_type)
body = message.get("body", b"")
more_body = message.get("more_body", False)
# Write response body
event = h11.Data(data=body)
output = protocol.conn.send(event)
protocol.transport.write(output)
# Handle response completion
if not more_body:
self.response_complete = True
event = h11.EndOfMessage()
output = protocol.conn.send(event)
protocol.transport.write(output)
else:
# Response already sent
msg = "Unexpected ASGI message '%s' sent, after response already completed."
raise RuntimeError(msg % message_type)
if protocol.conn.our_state is h11.MUST_CLOSE:
event = h11.ConnectionClosed()
protocol.conn.send(event)
protocol.transport.close()
elif protocol.conn.our_state is h11.DONE and protocol.conn.their_state is h11.DONE:
protocol.resume_reading()
protocol.conn.start_next_cycle()
|
https://github.com/encode/uvicorn/issues/111
|
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/uvicorn/protocols/http/httptools.py", line 196, in run_asgi
result = await asgi(self.receive, self.send)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/channels/http.py", line 190, in __call__
await self.handle(body)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/asgiref/sync.py", line 110, in __call__
return await asyncio.wait_for(future, timeout=None)
File "/usr/lib/python3.6/asyncio/tasks.py", line 339, in wait_for
return (yield from fut)
File "/usr/lib/python3.6/concurrent/futures/thread.py", line 56, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/asgiref/sync.py", line 125, in thread_handler
return self.func(*args, **kwargs)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/channels/http.py", line 229, in handle
self.send(response_message)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/asgiref/sync.py", line 64, in __call__
return call_result.result()
File "/usr/lib/python3.6/concurrent/futures/_base.py", line 432, in result
return self.__get_result()
File "/usr/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/asgiref/sync.py", line 78, in main_wrap
result = await self.awaitable(*args, **kwargs)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/uvicorn/protocols/http/httptools.py", line 308, in send
protocol.transport.write(body)
File "uvloop/handles/stream.pyx", line 636, in uvloop.loop.UVStream.write
File "uvloop/handles/handle.pyx", line 165, in uvloop.loop.UVHandle._ensure_alive
RuntimeError: unable to perform operation on <TCPTransport closed=True reading=False 0x1a48ad8>; the handler is closed
|
RuntimeError
|
def connection_lost(self, exc):
if self.access_logs:
self.logger.debug("%s - Disconnected", self.server[0])
if self.cycle and not self.cycle.response_complete:
self.cycle.disconnected = True
self.client_event.set()
|
def connection_lost(self, exc):
if self.access_logs:
self.logger.debug("%s - Disconnected", self.server[0])
if self.cycle and self.cycle.more_body:
self.cycle.disconnected = True
self.client_event.set()
|
https://github.com/encode/uvicorn/issues/111
|
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/uvicorn/protocols/http/httptools.py", line 196, in run_asgi
result = await asgi(self.receive, self.send)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/channels/http.py", line 190, in __call__
await self.handle(body)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/asgiref/sync.py", line 110, in __call__
return await asyncio.wait_for(future, timeout=None)
File "/usr/lib/python3.6/asyncio/tasks.py", line 339, in wait_for
return (yield from fut)
File "/usr/lib/python3.6/concurrent/futures/thread.py", line 56, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/asgiref/sync.py", line 125, in thread_handler
return self.func(*args, **kwargs)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/channels/http.py", line 229, in handle
self.send(response_message)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/asgiref/sync.py", line 64, in __call__
return call_result.result()
File "/usr/lib/python3.6/concurrent/futures/_base.py", line 432, in result
return self.__get_result()
File "/usr/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/asgiref/sync.py", line 78, in main_wrap
result = await self.awaitable(*args, **kwargs)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/uvicorn/protocols/http/httptools.py", line 308, in send
protocol.transport.write(body)
File "uvloop/handles/stream.pyx", line 636, in uvloop.loop.UVStream.write
File "uvloop/handles/handle.pyx", line 165, in uvloop.loop.UVHandle._ensure_alive
RuntimeError: unable to perform operation on <TCPTransport closed=True reading=False 0x1a48ad8>; the handler is closed
|
RuntimeError
|
async def send(self, message):
protocol = self.protocol
message_type = message["type"]
if self.disconnected:
return
if not protocol.writable:
await protocol.writable_event.wait()
if not self.response_started:
# Sending response status line and headers
if message_type != "http.response.start":
msg = "Expected ASGI message 'http.response.start', but got '%s'."
raise RuntimeError(msg % message_type)
self.response_started = True
status_code = message["status"]
headers = message.get("headers", [])
if protocol.access_logs:
protocol.logger.info(
'%s - "%s %s HTTP/%s" %d',
self.scope["server"][0],
self.scope["method"],
self.scope["path"],
self.scope["http_version"],
status_code,
)
# Write response status line and headers
content = [STATUS_LINE[status_code], DEFAULT_HEADERS]
for name, value in headers:
name = name.lower()
if name == b"content-length" and self.chunked_encoding is None:
self.expected_content_length = int(value.decode())
self.chunked_encoding = False
elif name == b"transfer-encoding" and value.lower() == b"chunked":
self.chunked_encoding = True
elif name == b"connection" and value.lower() == b"close":
self.keep_alive = False
content.extend([name, b": ", value, b"\r\n"])
if self.chunked_encoding is None:
# Neither content-length nor transfer-encoding specified
self.chunked_encoding = True
content.append(b"transfer-encoding: chunked\r\n")
content.append(b"\r\n")
protocol.transport.write(b"".join(content))
elif not self.response_complete:
# Sending response body
if message_type != "http.response.body":
msg = "Expected ASGI message 'http.response.body', but got '%s'."
raise RuntimeError(msg % message_type)
body = message.get("body", b"")
more_body = message.get("more_body", False)
# Write response body
if self.chunked_encoding:
content = [b"%x\r\n" % len(body), body, b"\r\n"]
if not more_body:
content.append(b"0\r\n\r\n")
protocol.transport.write(b"".join(content))
else:
num_bytes = len(body)
if num_bytes > self.expected_content_length:
raise RuntimeError("Response content longer than Content-Length")
else:
self.expected_content_length -= num_bytes
protocol.transport.write(body)
# Handle response completion
if not more_body:
if self.expected_content_length != 0:
raise RuntimeError("Response content shorter than Content-Length")
self.response_complete = True
if not self.keep_alive:
protocol.transport.close()
else:
protocol.resume_reading()
else:
# Response already sent
msg = "Unexpected ASGI message '%s' sent, after response already completed."
raise RuntimeError(msg % message_type)
|
async def send(self, message):
protocol = self.protocol
message_type = message["type"]
if not protocol.writable:
await protocol.writable_event.wait()
if not self.response_started:
# Sending response status line and headers
if message_type != "http.response.start":
msg = "Expected ASGI message 'http.response.start', but got '%s'."
raise RuntimeError(msg % message_type)
self.response_started = True
status_code = message["status"]
headers = message.get("headers", [])
if protocol.access_logs:
protocol.logger.info(
'%s - "%s %s HTTP/%s" %d',
self.scope["server"][0],
self.scope["method"],
self.scope["path"],
self.scope["http_version"],
status_code,
)
# Write response status line and headers
content = [STATUS_LINE[status_code], DEFAULT_HEADERS]
for name, value in headers:
name = name.lower()
if name == b"content-length" and self.chunked_encoding is None:
self.expected_content_length = int(value.decode())
self.chunked_encoding = False
elif name == b"transfer-encoding" and value.lower() == b"chunked":
self.chunked_encoding = True
elif name == b"connection" and value.lower() == b"close":
self.keep_alive = False
content.extend([name, b": ", value, b"\r\n"])
if self.chunked_encoding is None:
# Neither content-length nor transfer-encoding specified
self.chunked_encoding = True
content.append(b"transfer-encoding: chunked\r\n")
content.append(b"\r\n")
protocol.transport.write(b"".join(content))
elif not self.response_complete:
# Sending response body
if message_type != "http.response.body":
msg = "Expected ASGI message 'http.response.body', but got '%s'."
raise RuntimeError(msg % message_type)
body = message.get("body", b"")
more_body = message.get("more_body", False)
# Write response body
if self.chunked_encoding:
content = [b"%x\r\n" % len(body), body, b"\r\n"]
if not more_body:
content.append(b"0\r\n\r\n")
protocol.transport.write(b"".join(content))
else:
num_bytes = len(body)
if num_bytes > self.expected_content_length:
raise RuntimeError("Response content longer than Content-Length")
else:
self.expected_content_length -= num_bytes
protocol.transport.write(body)
# Handle response completion
if not more_body:
if self.expected_content_length != 0:
raise RuntimeError("Response content shorter than Content-Length")
self.response_complete = True
if not self.keep_alive:
protocol.transport.close()
else:
protocol.resume_reading()
else:
# Response already sent
msg = "Unexpected ASGI message '%s' sent, after response already completed."
raise RuntimeError(msg % message_type)
|
https://github.com/encode/uvicorn/issues/111
|
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/uvicorn/protocols/http/httptools.py", line 196, in run_asgi
result = await asgi(self.receive, self.send)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/channels/http.py", line 190, in __call__
await self.handle(body)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/asgiref/sync.py", line 110, in __call__
return await asyncio.wait_for(future, timeout=None)
File "/usr/lib/python3.6/asyncio/tasks.py", line 339, in wait_for
return (yield from fut)
File "/usr/lib/python3.6/concurrent/futures/thread.py", line 56, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/asgiref/sync.py", line 125, in thread_handler
return self.func(*args, **kwargs)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/channels/http.py", line 229, in handle
self.send(response_message)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/asgiref/sync.py", line 64, in __call__
return call_result.result()
File "/usr/lib/python3.6/concurrent/futures/_base.py", line 432, in result
return self.__get_result()
File "/usr/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/asgiref/sync.py", line 78, in main_wrap
result = await self.awaitable(*args, **kwargs)
File "/home/chillar/.virtualenvs/library/lib/python3.6/site-packages/uvicorn/protocols/http/httptools.py", line 308, in send
protocol.transport.write(body)
File "uvloop/handles/stream.pyx", line 636, in uvloop.loop.UVStream.write
File "uvloop/handles/handle.pyx", line 165, in uvloop.loop.UVHandle._ensure_alive
RuntimeError: unable to perform operation on <TCPTransport closed=True reading=False 0x1a48ad8>; the handler is closed
|
RuntimeError
|
def _get_server_start_message(is_ipv6_message: bool = False) -> Tuple[str, str]:
if is_ipv6_message:
ip_repr = "%s://[%s]:%d"
else:
ip_repr = "%s://%s:%d"
message = f"Uvicorn running on {ip_repr} (Press CTRL+C to quit)"
color_message = (
"Uvicorn running on "
+ click.style(ip_repr, bold=True)
+ " (Press CTRL+C to quit)"
)
return message, color_message
|
def _get_server_start_message(
host_ip_version: _IPKind = _IPKind.IPv4,
) -> Tuple[str, str]:
if host_ip_version is _IPKind.IPv6:
ip_repr = "%s://[%s]:%d"
else:
ip_repr = "%s://%s:%d"
message = f"Uvicorn running on {ip_repr} (Press CTRL+C to quit)"
color_message = (
"Uvicorn running on "
+ click.style(ip_repr, bold=True)
+ " (Press CTRL+C to quit)"
)
return message, color_message
|
https://github.com/encode/uvicorn/issues/825
|
.py thon/debugpy/launcher 61232 -- /Users/paulafernandez/Sandbox/test/test/start
INFO: Started server process [43171]
INFO: Waiting for application startup.
INFO: ASGI 'lifespan' protocol appears unsupported.
INFO: Application startup complete.
Traceback (most recent call last):
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/Users/paulafernandez/.vscode/extensions/ms-python.python-2020.9.114305/pythonFiles/lib/python/debugpy/__main__.py", line 45, in <module>
cli.main()
File "/Users/paulafernandez/.vscode/extensions/ms-python.python-2020.9.114305/pythonFiles/lib/python/debugpy/../debugpy/server/cli.py", line 430, in main
run()
File "/Users/paulafernandez/.vscode/extensions/ms-python.python-2020.9.114305/pythonFiles/lib/python/debugpy/../debugpy/server/cli.py", line 267, in run_file
runpy.run_path(options.target, run_name=compat.force_str("__main__"))
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 265, in run_path
return _run_module_code(code, init_globals, run_name,
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 97, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/Users/paulafernandez/Sandbox/test/test/start.py", line 18, in <module>
uvicorn.run(
File "/Users/paulafernandez/Library/Caches/pypoetry/virtualenvs/test-8kdwd4GS-py3.8/lib/python3.8/site-packages/uvicorn/main.py", line 391, in run
server.run()
File "/Users/paulafernandez/Library/Caches/pypoetry/virtualenvs/test-8kdwd4GS-py3.8/lib/python3.8/site-packages/uvicorn/main.py", line 419, in run
loop.run_until_complete(self.serve(sockets=sockets))
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete
return future.result()
File "/Users/paulafernandez/Library/Caches/pypoetry/virtualenvs/test-8kdwd4GS-py3.8/lib/python3.8/site-packages/uvicorn/main.py", line 436, in serve
await self.startup(sockets=sockets)
File "/Users/paulafernandez/Library/Caches/pypoetry/virtualenvs/test-8kdwd4GS-py3.8/lib/python3.8/site-packages/uvicorn/main.py", line 526, in startup
if isinstance(ip_address(config.host), IPv6Address):
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/ipaddress.py", line 53, in ip_address
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
ValueError: 'localhost' does not appear to be an IPv4 or IPv6 address
|
ValueError
|
def bind_socket(self):
family, sockettype, proto, canonname, sockaddr = socket.getaddrinfo(
self.host, self.port, type=socket.SOCK_STREAM
)[0]
sock = socket.socket(family=family, type=sockettype)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.bind((self.host, self.port))
except OSError as exc:
logger.error(exc)
sys.exit(1)
sock.set_inheritable(True)
if family == socket.AddressFamily.AF_INET6:
message, color_message = _get_server_start_message(is_ipv6_message=True)
else:
message, color_message = _get_server_start_message()
protocol_name = "https" if self.is_ssl else "http"
logger.info(
message,
protocol_name,
self.host,
self.port,
extra={"color_message": color_message},
)
return sock
|
def bind_socket(self):
family, sockettype, proto, canonname, sockaddr = socket.getaddrinfo(
self.host, self.port, type=socket.SOCK_STREAM
)[0]
sock = socket.socket(family=family, type=sockettype)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.bind((self.host, self.port))
except OSError as exc:
logger.error(exc)
sys.exit(1)
sock.set_inheritable(True)
if family == socket.AddressFamily.AF_INET6:
message, color_message = _get_server_start_message(_IPKind.IPv6)
else:
message, color_message = _get_server_start_message(_IPKind.IPv4)
protocol_name = "https" if self.is_ssl else "http"
logger.info(
message,
protocol_name,
self.host,
self.port,
extra={"color_message": color_message},
)
return sock
|
https://github.com/encode/uvicorn/issues/825
|
.py thon/debugpy/launcher 61232 -- /Users/paulafernandez/Sandbox/test/test/start
INFO: Started server process [43171]
INFO: Waiting for application startup.
INFO: ASGI 'lifespan' protocol appears unsupported.
INFO: Application startup complete.
Traceback (most recent call last):
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/Users/paulafernandez/.vscode/extensions/ms-python.python-2020.9.114305/pythonFiles/lib/python/debugpy/__main__.py", line 45, in <module>
cli.main()
File "/Users/paulafernandez/.vscode/extensions/ms-python.python-2020.9.114305/pythonFiles/lib/python/debugpy/../debugpy/server/cli.py", line 430, in main
run()
File "/Users/paulafernandez/.vscode/extensions/ms-python.python-2020.9.114305/pythonFiles/lib/python/debugpy/../debugpy/server/cli.py", line 267, in run_file
runpy.run_path(options.target, run_name=compat.force_str("__main__"))
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 265, in run_path
return _run_module_code(code, init_globals, run_name,
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 97, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/Users/paulafernandez/Sandbox/test/test/start.py", line 18, in <module>
uvicorn.run(
File "/Users/paulafernandez/Library/Caches/pypoetry/virtualenvs/test-8kdwd4GS-py3.8/lib/python3.8/site-packages/uvicorn/main.py", line 391, in run
server.run()
File "/Users/paulafernandez/Library/Caches/pypoetry/virtualenvs/test-8kdwd4GS-py3.8/lib/python3.8/site-packages/uvicorn/main.py", line 419, in run
loop.run_until_complete(self.serve(sockets=sockets))
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete
return future.result()
File "/Users/paulafernandez/Library/Caches/pypoetry/virtualenvs/test-8kdwd4GS-py3.8/lib/python3.8/site-packages/uvicorn/main.py", line 436, in serve
await self.startup(sockets=sockets)
File "/Users/paulafernandez/Library/Caches/pypoetry/virtualenvs/test-8kdwd4GS-py3.8/lib/python3.8/site-packages/uvicorn/main.py", line 526, in startup
if isinstance(ip_address(config.host), IPv6Address):
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/ipaddress.py", line 53, in ip_address
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
ValueError: 'localhost' does not appear to be an IPv4 or IPv6 address
|
ValueError
|
async def startup(self, sockets=None):
await self.lifespan.startup()
if self.lifespan.should_exit:
self.should_exit = True
return
config = self.config
create_protocol = functools.partial(
config.http_protocol_class, config=config, server_state=self.server_state
)
loop = asyncio.get_event_loop()
if sockets is not None:
# Explicitly passed a list of open sockets.
# We use this when the server is run from a Gunicorn worker.
def _share_socket(sock: socket) -> socket:
# Windows requires the socket be explicitly shared across
# multiple workers (processes).
from socket import fromshare # type: ignore
sock_data = sock.share(os.getpid()) # type: ignore
return fromshare(sock_data)
self.servers = []
for sock in sockets:
if config.workers > 1 and platform.system() == "Windows":
sock = _share_socket(sock)
server = await loop.create_server(
create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog
)
self.servers.append(server)
elif config.fd is not None:
# Use an existing socket, from a file descriptor.
sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM)
server = await loop.create_server(
create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog
)
message = "Uvicorn running on socket %s (Press CTRL+C to quit)"
logger.info(message % str(sock.getsockname()))
self.servers = [server]
elif config.uds is not None:
# Create a socket using UNIX domain socket.
uds_perms = 0o666
if os.path.exists(config.uds):
uds_perms = os.stat(config.uds).st_mode
server = await loop.create_unix_server(
create_protocol, path=config.uds, ssl=config.ssl, backlog=config.backlog
)
os.chmod(config.uds, uds_perms)
message = "Uvicorn running on unix socket %s (Press CTRL+C to quit)"
logger.info(message % config.uds)
self.servers = [server]
else:
# Standard case. Create a socket from a host/port pair.
try:
server = await loop.create_server(
create_protocol,
host=config.host,
port=config.port,
ssl=config.ssl,
backlog=config.backlog,
)
except OSError as exc:
logger.error(exc)
await self.lifespan.shutdown()
sys.exit(1)
port = config.port
if port == 0:
port = server.sockets[0].getsockname()[1]
protocol_name = "https" if config.ssl else "http"
try:
addr = ip_address(config.host)
if isinstance(addr, IPv6Address):
message, color_message = _get_server_start_message(is_ipv6_message=True)
elif isinstance(addr, IPv4Address):
message, color_message = _get_server_start_message()
except ValueError:
message, color_message = _get_server_start_message()
logger.info(
message,
protocol_name,
config.host,
port,
extra={"color_message": color_message},
)
self.servers = [server]
self.started = True
|
async def startup(self, sockets=None):
await self.lifespan.startup()
if self.lifespan.should_exit:
self.should_exit = True
return
config = self.config
create_protocol = functools.partial(
config.http_protocol_class, config=config, server_state=self.server_state
)
loop = asyncio.get_event_loop()
if sockets is not None:
# Explicitly passed a list of open sockets.
# We use this when the server is run from a Gunicorn worker.
def _share_socket(sock: socket) -> socket:
# Windows requires the socket be explicitly shared across
# multiple workers (processes).
from socket import fromshare # type: ignore
sock_data = sock.share(os.getpid()) # type: ignore
return fromshare(sock_data)
self.servers = []
for sock in sockets:
if config.workers > 1 and platform.system() == "Windows":
sock = _share_socket(sock)
server = await loop.create_server(
create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog
)
self.servers.append(server)
elif config.fd is not None:
# Use an existing socket, from a file descriptor.
sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM)
server = await loop.create_server(
create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog
)
message = "Uvicorn running on socket %s (Press CTRL+C to quit)"
logger.info(message % str(sock.getsockname()))
self.servers = [server]
elif config.uds is not None:
# Create a socket using UNIX domain socket.
uds_perms = 0o666
if os.path.exists(config.uds):
uds_perms = os.stat(config.uds).st_mode
server = await loop.create_unix_server(
create_protocol, path=config.uds, ssl=config.ssl, backlog=config.backlog
)
os.chmod(config.uds, uds_perms)
message = "Uvicorn running on unix socket %s (Press CTRL+C to quit)"
logger.info(message % config.uds)
self.servers = [server]
else:
# Standard case. Create a socket from a host/port pair.
try:
server = await loop.create_server(
create_protocol,
host=config.host,
port=config.port,
ssl=config.ssl,
backlog=config.backlog,
)
except OSError as exc:
logger.error(exc)
await self.lifespan.shutdown()
sys.exit(1)
port = config.port
if port == 0:
port = server.sockets[0].getsockname()[1]
protocol_name = "https" if config.ssl else "http"
if isinstance(ip_address(config.host), IPv6Address):
message, color_message = _get_server_start_message(_IPKind.IPv6)
else:
message, color_message = _get_server_start_message(_IPKind.IPv4)
logger.info(
message,
protocol_name,
config.host,
port,
extra={"color_message": color_message},
)
self.servers = [server]
self.started = True
|
https://github.com/encode/uvicorn/issues/825
|
.py thon/debugpy/launcher 61232 -- /Users/paulafernandez/Sandbox/test/test/start
INFO: Started server process [43171]
INFO: Waiting for application startup.
INFO: ASGI 'lifespan' protocol appears unsupported.
INFO: Application startup complete.
Traceback (most recent call last):
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/Users/paulafernandez/.vscode/extensions/ms-python.python-2020.9.114305/pythonFiles/lib/python/debugpy/__main__.py", line 45, in <module>
cli.main()
File "/Users/paulafernandez/.vscode/extensions/ms-python.python-2020.9.114305/pythonFiles/lib/python/debugpy/../debugpy/server/cli.py", line 430, in main
run()
File "/Users/paulafernandez/.vscode/extensions/ms-python.python-2020.9.114305/pythonFiles/lib/python/debugpy/../debugpy/server/cli.py", line 267, in run_file
runpy.run_path(options.target, run_name=compat.force_str("__main__"))
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 265, in run_path
return _run_module_code(code, init_globals, run_name,
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 97, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/Users/paulafernandez/Sandbox/test/test/start.py", line 18, in <module>
uvicorn.run(
File "/Users/paulafernandez/Library/Caches/pypoetry/virtualenvs/test-8kdwd4GS-py3.8/lib/python3.8/site-packages/uvicorn/main.py", line 391, in run
server.run()
File "/Users/paulafernandez/Library/Caches/pypoetry/virtualenvs/test-8kdwd4GS-py3.8/lib/python3.8/site-packages/uvicorn/main.py", line 419, in run
loop.run_until_complete(self.serve(sockets=sockets))
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete
return future.result()
File "/Users/paulafernandez/Library/Caches/pypoetry/virtualenvs/test-8kdwd4GS-py3.8/lib/python3.8/site-packages/uvicorn/main.py", line 436, in serve
await self.startup(sockets=sockets)
File "/Users/paulafernandez/Library/Caches/pypoetry/virtualenvs/test-8kdwd4GS-py3.8/lib/python3.8/site-packages/uvicorn/main.py", line 526, in startup
if isinstance(ip_address(config.host), IPv6Address):
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/ipaddress.py", line 53, in ip_address
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
ValueError: 'localhost' does not appear to be an IPv4 or IPv6 address
|
ValueError
|
async def startup(self, sockets=None):
await self.lifespan.startup()
if self.lifespan.should_exit:
self.should_exit = True
return
config = self.config
create_protocol = functools.partial(
config.http_protocol_class, config=config, server_state=self.server_state
)
loop = asyncio.get_event_loop()
if sockets is not None:
# Explicitly passed a list of open sockets.
# We use this when the server is run from a Gunicorn worker.
def _share_socket(sock: socket) -> socket:
# Windows requires the socket be explicitly shared across
# multiple workers (processes).
from socket import fromshare # type: ignore
sock_data = sock.share(os.getpid()) # type: ignore
return fromshare(sock_data)
self.servers = []
for sock in sockets:
if config.workers > 1 and platform.system() == "Windows":
sock = _share_socket(sock)
server = await loop.create_server(
create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog
)
self.servers.append(server)
elif config.fd is not None:
# Use an existing socket, from a file descriptor.
sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM)
server = await loop.create_server(
create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog
)
message = "Uvicorn running on socket %s (Press CTRL+C to quit)"
logger.info(message % str(sock.getsockname()))
self.servers = [server]
elif config.uds is not None:
# Create a socket using UNIX domain socket.
uds_perms = 0o666
if os.path.exists(config.uds):
uds_perms = os.stat(config.uds).st_mode
server = await loop.create_unix_server(
create_protocol, path=config.uds, ssl=config.ssl, backlog=config.backlog
)
os.chmod(config.uds, uds_perms)
message = "Uvicorn running on unix socket %s (Press CTRL+C to quit)"
logger.info(message % config.uds)
self.servers = [server]
else:
# Standard case. Create a socket from a host/port pair.
try:
server = await loop.create_server(
create_protocol,
host=config.host,
port=config.port,
ssl=config.ssl,
backlog=config.backlog,
)
except OSError as exc:
logger.error(exc)
await self.lifespan.shutdown()
sys.exit(1)
port = config.port
if port == 0:
port = server.sockets[0].getsockname()[1]
protocol_name = "https" if config.ssl else "http"
message = "Uvicorn running on %s://%s:%d (Press CTRL+C to quit)"
color_message = (
"Uvicorn running on "
+ click.style("%s://%s:%d", bold=True)
+ " (Press CTRL+C to quit)"
)
logger.info(
message,
protocol_name,
config.host,
port,
extra={"color_message": color_message},
)
self.servers = [server]
self.started = True
|
async def startup(self, sockets=None):
await self.lifespan.startup()
if self.lifespan.should_exit:
self.should_exit = True
return
config = self.config
create_protocol = functools.partial(
config.http_protocol_class, config=config, server_state=self.server_state
)
loop = asyncio.get_event_loop()
if sockets is not None:
# Explicitly passed a list of open sockets.
# We use this when the server is run from a Gunicorn worker.
self.servers = []
for sock in sockets:
server = await loop.create_server(
create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog
)
self.servers.append(server)
elif config.fd is not None:
# Use an existing socket, from a file descriptor.
sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM)
server = await loop.create_server(
create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog
)
message = "Uvicorn running on socket %s (Press CTRL+C to quit)"
logger.info(message % str(sock.getsockname()))
self.servers = [server]
elif config.uds is not None:
# Create a socket using UNIX domain socket.
uds_perms = 0o666
if os.path.exists(config.uds):
uds_perms = os.stat(config.uds).st_mode
server = await loop.create_unix_server(
create_protocol, path=config.uds, ssl=config.ssl, backlog=config.backlog
)
os.chmod(config.uds, uds_perms)
message = "Uvicorn running on unix socket %s (Press CTRL+C to quit)"
logger.info(message % config.uds)
self.servers = [server]
else:
# Standard case. Create a socket from a host/port pair.
try:
server = await loop.create_server(
create_protocol,
host=config.host,
port=config.port,
ssl=config.ssl,
backlog=config.backlog,
)
except OSError as exc:
logger.error(exc)
await self.lifespan.shutdown()
sys.exit(1)
port = config.port
if port == 0:
port = server.sockets[0].getsockname()[1]
protocol_name = "https" if config.ssl else "http"
message = "Uvicorn running on %s://%s:%d (Press CTRL+C to quit)"
color_message = (
"Uvicorn running on "
+ click.style("%s://%s:%d", bold=True)
+ " (Press CTRL+C to quit)"
)
logger.info(
message,
protocol_name,
config.host,
port,
extra={"color_message": color_message},
)
self.servers = [server]
self.started = True
|
https://github.com/encode/uvicorn/issues/683
|
INFO: Uvicorn running on http://127.0.0.1:9000 (Press CTRL+C to quit)
INFO: Started parent process [17696]
INFO: Started server process [20428]
INFO: Started server process [15476]
INFO: Waiting for application startup.
INFO: Waiting for application startup.
INFO: ASGI 'lifespan' protocol appears unsupported.
INFO: ASGI 'lifespan' protocol appears unsupported.
INFO: Application startup complete.
INFO: Application startup complete.
Process SpawnProcess-2:
Traceback (most recent call last):
File "c:\users\arnab\appdata\local\programs\python\python37-32\Lib\multiprocessing\process.py", line 297, in _bootstrap
self.run()
File "c:\users\arnab\appdata\local\programs\python\python37-32\Lib\multiprocessing\process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "t:\my projects\scholify-web\env\lib\site-packages\uvicorn\subprocess.py", line 62, in subprocess_started
target(sockets=sockets)
File "t:\my projects\scholify-web\env\lib\site-packages\uvicorn\main.py", line 382, in run
loop.run_until_complete(self.serve(sockets=sockets))
File "c:\users\arnab\appdata\local\programs\python\python37-32\Lib\asyncio\base_events.py", line 579, in run_until_complete
return future.result()
File "t:\my projects\scholify-web\env\lib\site-packages\uvicorn\main.py", line 399, in serve
await self.startup(sockets=sockets)
File "t:\my projects\scholify-web\env\lib\site-packages\uvicorn\main.py", line 433, in startup
create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog
File "c:\users\arnab\appdata\local\programs\python\python37-32\Lib\asyncio\base_events.py", line 1393, in create_server
server._start_serving()
File "c:\users\arnab\appdata\local\programs\python\python37-32\Lib\asyncio\base_events.py", line 282, in _start_serving
sock.listen(self._backlog)
OSError: [WinError 10022] An invalid argument was supplied
|
OSError
|
def data_received(self, data):
try:
self.parser.feed_data(data)
except httptools.parser.errors.HttpParserError:
msg = "Invalid HTTP request received."
self.logger.warn(msg)
self.transport.close()
except httptools.HttpParserUpgrade:
websocket_upgrade(self)
|
def data_received(self, data):
try:
self.parser.feed_data(data)
except httptools.HttpParserUpgrade:
websocket_upgrade(self)
|
https://github.com/encode/uvicorn/issues/75
|
uvicorn[7423]: Unhandled exception in event loop
uvicorn[7423]: Traceback (most recent call last):
uvicorn[7423]: File "uvloop/handles/stream.pyx", line 784, in uvloop.loop.__uv_stream_on_read_impl
uvicorn[7423]: File "uvloop/handles/stream.pyx", line 563, in uvloop.loop.UVStream._on_read
uvicorn[7423]: File "/home/web/.virtualenvs/mesh/lib/python3.5/site-packages/uvicorn/protocols/http.py", line 212, in data_received
uvicorn[7423]: self.request_parser.feed_data(data)
uvicorn[7423]: File "httptools/parser/parser.pyx", line 193, in httptools.parser.parser.HttpParser.feed_data
uvicorn[7423]: httptools.parser.errors.HttpParserInvalidMethodError: invalid HTTP method
|
httptools.parser.errors.HttpParserInvalidMethodError
|
def analyze(
problem,
X,
Y,
num_resamples=1000,
conf_level=0.95,
print_to_console=False,
grid_jump=2,
num_levels=4,
):
"""Perform Morris Analysis on model outputs.
Returns a dictionary with keys 'mu', 'mu_star', 'sigma', and
'mu_star_conf', where each entry is a list of parameters containing
the indices in the same order as the parameter file.
Arguments
---------
problem : dict
The problem definition
X : numpy.matrix
The NumPy matrix containing the model inputs of dtype=float
Y : numpy.array
The NumPy array containing the model outputs of dtype=float
num_resamples : int
The number of resamples used to compute the confidence
intervals (default 1000)
conf_level : float
The confidence interval level (default 0.95)
print_to_console : bool
Print results directly to console (default False)
grid_jump : int
The grid jump size, must be identical to the value passed
to :func:`SALib.sample.morris.sample` (default 2)
num_levels : int
The number of grid levels, must be identical to the value
passed to SALib.sample.morris (default 4)
Returns
-------
Si : dict
A dictionary of sensitivity indices containing the following entries.
- `mu` - the mean elementary effect
- `mu_star` - the absolute of the mean elementary effect
- `sigma` - the standard deviation of the elementary effect
- `mu_star_conf` - the bootstrapped confidence interval
- `names` - the names of the parameters
References
----------
.. [1] Morris, M. (1991). "Factorial Sampling Plans for Preliminary
Computational Experiments." Technometrics, 33(2):161-174,
doi:10.1080/00401706.1991.10484804.
.. [2] Campolongo, F., J. Cariboni, and A. Saltelli (2007). "An effective
screening design for sensitivity analysis of large models."
Environmental Modelling & Software, 22(10):1509-1518,
doi:10.1016/j.envsoft.2006.10.004.
Examples
--------
>>> X = morris.sample(problem, 1000, num_levels=4, grid_jump=2)
>>> Y = Ishigami.evaluate(X)
>>> Si = morris.analyze(problem, X, Y, conf_level=0.95,
>>> print_to_console=True, num_levels=4, grid_jump=2)
"""
msg = "dtype of {} array must be 'float', float32 or float64"
if X.dtype not in ["float", "float32", "float64"]:
raise ValueError(msg.format("X"))
if Y.dtype not in ["float", "float32", "float64"]:
raise ValueError(msg.format("Y"))
# Assume that there are no groups
groups = None
delta = grid_jump / (num_levels - 1)
num_vars = problem["num_vars"]
if (problem.get("groups") is None) & (Y.size % (num_vars + 1) == 0):
num_trajectories = int(Y.size / (num_vars + 1))
elif problem.get("groups") is not None:
groups, unique_group_names = compute_groups_matrix(problem["groups"], num_vars)
number_of_groups = len(unique_group_names)
num_trajectories = int(Y.size / (number_of_groups + 1))
else:
raise ValueError(
"Number of samples in model output file must be"
"a multiple of (D+1), where D is the number of"
"parameters (or groups) in your parameter file."
)
ee = np.zeros((num_vars, num_trajectories))
ee = compute_elementary_effects(X, Y, int(Y.size / num_trajectories), delta)
# Output the Mu, Mu*, and Sigma Values. Also return them in case this is
# being called from Python
Si = dict(
(k, [None] * num_vars)
for k in ["names", "mu", "mu_star", "sigma", "mu_star_conf"]
)
Si["mu"] = np.average(ee, 1)
Si["mu_star"] = np.average(np.abs(ee), 1)
Si["sigma"] = np.std(ee, axis=1, ddof=1)
Si["names"] = problem["names"]
for j in range(num_vars):
Si["mu_star_conf"][j] = compute_mu_star_confidence(
ee[j, :], num_trajectories, num_resamples, conf_level
)
if groups is None:
if print_to_console:
print(
"{0:<30} {1:>10} {2:>10} {3:>15} {4:>10}".format(
"Parameter", "Mu_Star", "Mu", "Mu_Star_Conf", "Sigma"
)
)
for j in list(range(num_vars)):
print(
"{0:30} {1:10.3f} {2:10.3f} {3:15.3f} {4:10.3f}".format(
Si["names"][j],
Si["mu_star"][j],
Si["mu"][j],
Si["mu_star_conf"][j],
Si["sigma"][j],
)
)
return Si
elif groups is not None:
# if there are groups, then the elementary effects returned need to be
# computed over the groups of variables,
# rather than the individual variables
Si_grouped = dict((k, [None] * num_vars) for k in ["mu_star", "mu_star_conf"])
Si_grouped["mu_star"] = compute_grouped_metric(Si["mu_star"], groups)
Si_grouped["mu_star_conf"] = compute_grouped_metric(Si["mu_star_conf"], groups)
Si_grouped["names"] = unique_group_names
Si_grouped["sigma"] = compute_grouped_sigma(Si["sigma"], groups)
Si_grouped["mu"] = compute_grouped_sigma(Si["mu"], groups)
if print_to_console:
print(
"{0:<30} {1:>10} {2:>10} {3:>15} {4:>10}".format(
"Parameter", "Mu_Star", "Mu", "Mu_Star_Conf", "Sigma"
)
)
for j in list(range(number_of_groups)):
print(
"{0:30} {1:10.3f} {2:10.3f} {3:15.3f} {4:10.3f}".format(
Si_grouped["names"][j],
Si_grouped["mu_star"][j],
Si_grouped["mu"][j],
Si_grouped["mu_star_conf"][j],
Si_grouped["sigma"][j],
)
)
return Si_grouped
else:
raise RuntimeError("Could not determine which parameters should be returned")
|
def analyze(
problem,
X,
Y,
num_resamples=1000,
conf_level=0.95,
print_to_console=False,
grid_jump=2,
num_levels=4,
):
"""Perform Morris Analysis on model outputs.
Returns a dictionary with keys 'mu', 'mu_star', 'sigma', and
'mu_star_conf', where each entry is a list of parameters containing
the indices in the same order as the parameter file.
Arguments
---------
problem : dict
The problem definition
X : numpy.matrix
The NumPy matrix containing the model inputs
Y : numpy.array
The NumPy array containing the model outputs
num_resamples : int
The number of resamples used to compute the confidence
intervals (default 1000)
conf_level : float
The confidence interval level (default 0.95)
print_to_console : bool
Print results directly to console (default False)
grid_jump : int
The grid jump size, must be identical to the value passed
to :func:`SALib.sample.morris.sample` (default 2)
num_levels : int
The number of grid levels, must be identical to the value
passed to SALib.sample.morris (default 4)
Returns
-------
Si : dict
A dictionary of sensitivity indices containing the following entries.
- `mu` - the mean elementary effect
- `mu_star` - the absolute of the mean elementary effect
- `sigma` - the standard deviation of the elementary effect
- `mu_star_conf` - the bootstrapped confidence interval
- `names` - the names of the parameters
References
----------
.. [1] Morris, M. (1991). "Factorial Sampling Plans for Preliminary
Computational Experiments." Technometrics, 33(2):161-174,
doi:10.1080/00401706.1991.10484804.
.. [2] Campolongo, F., J. Cariboni, and A. Saltelli (2007). "An effective
screening design for sensitivity analysis of large models."
Environmental Modelling & Software, 22(10):1509-1518,
doi:10.1016/j.envsoft.2006.10.004.
Examples
--------
>>> X = morris.sample(problem, 1000, num_levels=4, grid_jump=2)
>>> Y = Ishigami.evaluate(X)
>>> Si = morris.analyze(problem, X, Y, conf_level=0.95,
>>> print_to_console=True, num_levels=4, grid_jump=2)
"""
# Assume that there are no groups
groups = None
delta = grid_jump / (num_levels - 1)
num_vars = problem["num_vars"]
if (problem.get("groups") is None) & (Y.size % (num_vars + 1) == 0):
num_trajectories = int(Y.size / (num_vars + 1))
elif problem.get("groups") is not None:
groups, unique_group_names = compute_groups_matrix(problem["groups"], num_vars)
number_of_groups = len(unique_group_names)
num_trajectories = int(Y.size / (number_of_groups + 1))
else:
raise ValueError(
"Number of samples in model output file must be"
"a multiple of (D+1), where D is the number of"
"parameters (or groups) in your parameter file."
)
ee = np.zeros((num_vars, num_trajectories))
ee = compute_elementary_effects(X, Y, int(Y.size / num_trajectories), delta)
# Output the Mu, Mu*, and Sigma Values. Also return them in case this is
# being called from Python
Si = dict(
(k, [None] * num_vars)
for k in ["names", "mu", "mu_star", "sigma", "mu_star_conf"]
)
Si["mu"] = np.average(ee, 1)
Si["mu_star"] = np.average(np.abs(ee), 1)
Si["sigma"] = np.std(ee, axis=1, ddof=1)
Si["names"] = problem["names"]
for j in range(num_vars):
Si["mu_star_conf"][j] = compute_mu_star_confidence(
ee[j, :], num_trajectories, num_resamples, conf_level
)
if groups is None:
if print_to_console:
print(
"{0:<30} {1:>10} {2:>10} {3:>15} {4:>10}".format(
"Parameter", "Mu_Star", "Mu", "Mu_Star_Conf", "Sigma"
)
)
for j in list(range(num_vars)):
print(
"{0:30} {1:10.3f} {2:10.3f} {3:15.3f} {4:10.3f}".format(
Si["names"][j],
Si["mu_star"][j],
Si["mu"][j],
Si["mu_star_conf"][j],
Si["sigma"][j],
)
)
return Si
elif groups is not None:
# if there are groups, then the elementary effects returned need to be
# computed over the groups of variables,
# rather than the individual variables
Si_grouped = dict((k, [None] * num_vars) for k in ["mu_star", "mu_star_conf"])
Si_grouped["mu_star"] = compute_grouped_metric(Si["mu_star"], groups)
Si_grouped["mu_star_conf"] = compute_grouped_metric(Si["mu_star_conf"], groups)
Si_grouped["names"] = unique_group_names
Si_grouped["sigma"] = compute_grouped_sigma(Si["sigma"], groups)
Si_grouped["mu"] = compute_grouped_sigma(Si["mu"], groups)
if print_to_console:
print(
"{0:<30} {1:>10} {2:>10} {3:>15} {4:>10}".format(
"Parameter", "Mu_Star", "Mu", "Mu_Star_Conf", "Sigma"
)
)
for j in list(range(number_of_groups)):
print(
"{0:30} {1:10.3f} {2:10.3f} {3:15.3f} {4:10.3f}".format(
Si_grouped["names"][j],
Si_grouped["mu_star"][j],
Si_grouped["mu"][j],
Si_grouped["mu_star_conf"][j],
Si_grouped["sigma"][j],
)
)
return Si_grouped
else:
raise RuntimeError("Could not determine which parameters should be returned")
|
https://github.com/SALib/SALib/issues/153
|
Traceback (most recent call last):
File "postproc.py", line 239, in <module>
postproc(dir)
File "postproc.py", line 54, in postproc
calcDistances(dir, config)
File "postproc.py", line 207, in calcDistances
grid_jump=config['info']['jump'])
File "/home/buck06191/anaconda3/envs/bcmd/lib/python2.7/site-packages/SALib/analyze/morris.py", line 83, in analyze
ee = compute_elementary_effects(X, Y, int(Y.size / num_trajectories), delta)
File "/home/buck06191/anaconda3/envs/bcmd/lib/python2.7/site-packages/SALib/analyze/morris.py", line 233, in compute_elementary_effects
np.divide(ee, delta, out=ee)
TypeError: ufunc 'divide' output (typecode 'd') could not be coerced to provided output parameter (typecode 'l') according to the casting rule ''same_kind''
|
TypeError
|
def sample(problem, N, M=4):
"""Generate model inputs for the Fourier Amplitude Sensitivity Test (FAST).
Returns a NumPy matrix containing the model inputs required by the Fourier
Amplitude sensitivity test. The resulting matrix contains N rows and D
columns, where D is the number of parameters. The samples generated are
intended to be used by :func:`SALib.analyze.fast.analyze`.
Parameters
----------
problem : dict
The problem definition
N : int
The number of samples to generate
M : int
The interference parameter, i.e., the number of harmonics to sum in the
Fourier series decomposition (default 4)
"""
if N < 4 * M**2:
raise ValueError("""
Sample size N > 4M^2 is required. M=4 by default.""")
D = problem["num_vars"]
omega = np.empty([D])
omega[0] = math.floor((N - 1) / (2 * M))
m = math.floor(omega[0] / (2 * M))
if m >= (D - 1):
omega[1:] = np.floor(np.linspace(1, m, D - 1))
else:
omega[1:] = np.arange(D - 1) % m + 1
# Discretization of the frequency space, s
s = (2 * math.pi / N) * np.arange(N)
# Transformation to get points in the X space
X = np.empty([N * D, D])
omega2 = np.empty([D])
for i in range(D):
omega2[i] = omega[0]
idx = list(range(i)) + list(range(i + 1, D))
omega2[idx] = omega[1:]
l = range(i * N, (i + 1) * N)
# random phase shift on [0, 2pi) following Saltelli et al.
# Technometrics 1999
phi = 2 * math.pi * np.random.rand()
for j in range(D):
g = 0.5 + (1 / math.pi) * np.arcsin(np.sin(omega2[j] * s + phi))
X[l, j] = g
scale_samples(X, problem["bounds"])
return X
|
def sample(problem, N, M=4):
"""Generate model inputs for the Fourier Amplitude Sensitivity Test (FAST).
Returns a NumPy matrix containing the model inputs required by the Fourier
Amplitude sensitivity test. The resulting matrix contains N rows and D
columns, where D is the number of parameters. The samples generated are
intended to be used by :func:`SALib.analyze.fast.analyze`.
Parameters
----------
problem : dict
The problem definition
N : int
The number of samples to generate
M : int
The interference parameter, i.e., the number of harmonics to sum in the
Fourier series decomposition (default 4)
"""
D = problem["num_vars"]
omega = np.empty([D])
omega[0] = math.floor((N - 1) / (2 * M))
m = math.floor(omega[0] / (2 * M))
if m >= (D - 1):
omega[1:] = np.floor(np.linspace(1, m, D - 1))
else:
omega[1:] = np.arange(D - 1) % m + 1
# Discretization of the frequency space, s
s = (2 * math.pi / N) * np.arange(N)
# Transformation to get points in the X space
X = np.empty([N * D, D])
omega2 = np.empty([D])
for i in range(D):
omega2[i] = omega[0]
idx = list(range(i)) + list(range(i + 1, D))
omega2[idx] = omega[1:]
l = range(i * N, (i + 1) * N)
# random phase shift on [0, 2pi) following Saltelli et al.
# Technometrics 1999
phi = 2 * math.pi * np.random.rand()
for j in range(D):
g = 0.5 + (1 / math.pi) * np.arcsin(np.sin(omega2[j] * s + phi))
X[l, j] = g
scale_samples(X, problem["bounds"])
return X
|
https://github.com/SALib/SALib/issues/99
|
SALib\sample\fast_sampler.py:39: RuntimeWarning: invalid value encountered in remainder
omega[1:] = np.arange(D - 1) % m + 1
\SALib\analyze\fast.py:70: RuntimeWarning: invalid value encountered in remainder
omega[1:] = np.arange(D - 1) % m + 1
\SALib\analyze\fast.py:88: RuntimeWarning: invalid value encountered in power
Sp = np.power(np.absolute(f[np.arange(1, int(N / 2))]) / N, 2)
Traceback (most recent call last):
File "test.py", line 17, in <module>
Si = fast.analyze(problem, Y, print_to_console=False)
File "SALib\analyze\fast.py", line 78, in analyze
Si['S1'][i] = compute_first_order(Y[l], N, M, omega[0])
File "SALib\analyze\fast.py", line 90, in compute_first_order
D1 = 2 * np.sum(Sp[np.arange(1, M + 1) * int(omega) - 1])
IndexError: index 15 is out of bounds for axis 0 with size 15
|
IndexError
|
def analyze(
problem: Dict,
X: np.array,
Y: np.array,
num_resamples: int = 100,
conf_level: float = 0.95,
print_to_console: bool = False,
seed: int = None,
) -> Dict:
"""Perform Delta Moment-Independent Analysis on model outputs.
Returns a dictionary with keys 'delta', 'delta_conf', 'S1', and 'S1_conf',
where each entry is a list of size D (the number of parameters) containing
the indices in the same order as the parameter file.
Parameters
----------
problem : dict
The problem definition
X: numpy.matrix
A NumPy matrix containing the model inputs
Y : numpy.array
A NumPy array containing the model outputs
num_resamples : int
The number of resamples when computing confidence intervals (default 10)
conf_level : float
The confidence interval level (default 0.95)
print_to_console : bool
Print results directly to console (default False)
References
----------
.. [1] Borgonovo, E. (2007). "A new uncertainty importance measure."
Reliability Engineering & System Safety, 92(6):771-784,
doi:10.1016/j.ress.2006.04.015.
.. [2] Plischke, E., E. Borgonovo, and C. L. Smith (2013). "Global
sensitivity measures from given data." European Journal of
Operational Research, 226(3):536-550, doi:10.1016/j.ejor.2012.11.047.
Examples
--------
>>> X = latin.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = delta.analyze(problem, X, Y, print_to_console=True)
"""
if seed:
np.random.seed(seed)
D = problem["num_vars"]
N = Y.size
if not 0 < conf_level < 1:
raise RuntimeError("Confidence level must be between 0-1.")
# equal frequency partition
exp = 2 / (7 + np.tanh((1500 - N) / 500))
M = int(np.round(min(int(np.ceil(N**exp)), 48)))
m = np.linspace(0, N, M + 1)
Ygrid = np.linspace(np.min(Y), np.max(Y), 100)
keys = ("delta", "delta_conf", "S1", "S1_conf")
S = ResultDict((k, np.zeros(D)) for k in keys)
S["names"] = problem["names"]
if print_to_console:
print("Parameter %s %s %s %s" % keys)
try:
for i in range(D):
X_i = X[:, i]
S["delta"][i], S["delta_conf"][i] = bias_reduced_delta(
Y, Ygrid, X_i, m, num_resamples, conf_level
)
S["S1"][i] = sobol_first(Y, X_i, m)
S["S1_conf"][i] = sobol_first_conf(Y, X_i, m, num_resamples, conf_level)
if print_to_console:
print(
"%s %f %f %f %f"
% (
S["names"][i],
S["delta"][i],
S["delta_conf"][i],
S["S1"][i],
S["S1_conf"][i],
)
)
except np.linalg.LinAlgError as e:
msg = "Singular matrix detected\n"
msg += "This may be due to the sample size ({}) being too small\n".format(
Y.size
)
msg += "If this is not the case, check Y values or raise an issue with the\n"
msg += "SALib team"
raise np.linalg.LinAlgError(msg)
return S
|
def analyze(
problem, X, Y, num_resamples=10, conf_level=0.95, print_to_console=False, seed=None
):
"""Perform Delta Moment-Independent Analysis on model outputs.
Returns a dictionary with keys 'delta', 'delta_conf', 'S1', and 'S1_conf',
where each entry is a list of size D (the number of parameters) containing
the indices in the same order as the parameter file.
Parameters
----------
problem : dict
The problem definition
X: numpy.matrix
A NumPy matrix containing the model inputs
Y : numpy.array
A NumPy array containing the model outputs
num_resamples : int
The number of resamples when computing confidence intervals (default 10)
conf_level : float
The confidence interval level (default 0.95)
print_to_console : bool
Print results directly to console (default False)
References
----------
.. [1] Borgonovo, E. (2007). "A new uncertainty importance measure."
Reliability Engineering & System Safety, 92(6):771-784,
doi:10.1016/j.ress.2006.04.015.
.. [2] Plischke, E., E. Borgonovo, and C. L. Smith (2013). "Global
sensitivity measures from given data." European Journal of
Operational Research, 226(3):536-550, doi:10.1016/j.ejor.2012.11.047.
Examples
--------
>>> X = latin.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = delta.analyze(problem, X, Y, print_to_console=True)
"""
if seed:
np.random.seed(seed)
D = problem["num_vars"]
N = Y.size
if not 0 < conf_level < 1:
raise RuntimeError("Confidence level must be between 0-1.")
# equal frequency partition
exp = 2 / (7 + np.tanh((1500 - N) / 500))
M = int(np.round(min(int(np.ceil(N**exp)), 48)))
m = np.linspace(0, N, M + 1)
Ygrid = np.linspace(np.min(Y), np.max(Y), 100)
keys = ("delta", "delta_conf", "S1", "S1_conf")
S = ResultDict((k, np.zeros(D)) for k in keys)
S["names"] = problem["names"]
if print_to_console:
print("Parameter %s %s %s %s" % keys)
try:
for i in range(D):
X_i = X[:, i]
S["delta"][i], S["delta_conf"][i] = bias_reduced_delta(
Y, Ygrid, X_i, m, num_resamples, conf_level
)
S["S1"][i] = sobol_first(Y, X_i, m)
S["S1_conf"][i] = sobol_first_conf(Y, X_i, m, num_resamples, conf_level)
if print_to_console:
print(
"%s %f %f %f %f"
% (
S["names"][i],
S["delta"][i],
S["delta_conf"][i],
S["S1"][i],
S["S1_conf"][i],
)
)
except np.linalg.LinAlgError as e:
msg = "Singular matrix detected\n"
msg += "This may be due to the sample size ({}) being too small\n".format(
Y.size
)
msg += "If this is not the case, check Y values or raise an issue with the\n"
msg += "SALib team"
raise np.linalg.LinAlgError(msg)
return S
|
https://github.com/SALib/SALib/issues/5
|
Traceback (most recent call last):
File "sobol.py", line 13, in <module>
param_values = saltelli.sample(1000, param_file, calc_second_order = True)
File "../../SALib/sample/saltelli.py", line 19, in sample
base_sequence = sobol_sequence.sample(N + skip_values, 2*D)
File "../../SALib/sample/sobol_sequence.py", line 47, in sample
directions = read_directions_file('joe-kuo-6.21000.txt')
File "../../SALib/sample/sobol_sequence.py", line 120, in read_directions_file
return np.array(directions)
ValueError: setting an array element with a sequence.
|
ValueError
|
def calc_delta(Y, Ygrid, X, m):
"""Plischke et al. (2013) delta index estimator (eqn 26) for d_hat."""
N = len(Y)
fy = gaussian_kde(Y, bw_method="silverman")(Ygrid)
abs_fy = np.abs(fy)
xr = rankdata(X, method="ordinal")
d_hat = 0
for j in range(len(m) - 1):
ix = np.where((xr > m[j]) & (xr <= m[j + 1]))[0]
nm = len(ix)
Y_ix = Y[ix]
if not np.all(np.equal(Y_ix, Y_ix[0])):
fyc = gaussian_kde(Y_ix, bw_method="silverman")(Ygrid)
fy_ = np.abs(fy - fyc)
else:
fy_ = abs_fy
d_hat += (nm / (2 * N)) * np.trapz(fy_, Ygrid)
return d_hat
|
def calc_delta(Y, Ygrid, X, m):
N = len(Y)
fy = gaussian_kde(Y, bw_method="silverman")(Ygrid)
abs_fy = np.abs(fy)
xr = rankdata(X, method="ordinal")
d_hat = 0
for j in range(len(m) - 1):
ix = np.where((xr > m[j]) & (xr <= m[j + 1]))[0]
nm = len(ix)
Y_ix = Y[ix]
if not np.all(np.equal(Y_ix, Y_ix[0])):
fyc = gaussian_kde(Y_ix, bw_method="silverman")(Ygrid)
fy_ = np.abs(fy - fyc)
else:
fy_ = abs_fy
d_hat += (nm / (2 * N)) * np.trapz(fy_, Ygrid)
return d_hat
|
https://github.com/SALib/SALib/issues/5
|
Traceback (most recent call last):
File "sobol.py", line 13, in <module>
param_values = saltelli.sample(1000, param_file, calc_second_order = True)
File "../../SALib/sample/saltelli.py", line 19, in sample
base_sequence = sobol_sequence.sample(N + skip_values, 2*D)
File "../../SALib/sample/sobol_sequence.py", line 47, in sample
directions = read_directions_file('joe-kuo-6.21000.txt')
File "../../SALib/sample/sobol_sequence.py", line 120, in read_directions_file
return np.array(directions)
ValueError: setting an array element with a sequence.
|
ValueError
|
def analyze(
problem,
X,
Y,
num_resamples=100,
conf_level=0.95,
print_to_console=False,
num_levels=4,
seed=None,
):
"""Perform Morris Analysis on model outputs.
Returns a dictionary with keys 'mu', 'mu_star', 'sigma', and
'mu_star_conf', where each entry is a list of parameters containing
the indices in the same order as the parameter file.
Arguments
---------
problem : dict
The problem definition
X : numpy.matrix
The NumPy matrix containing the model inputs of dtype=float
Y : numpy.array
The NumPy array containing the model outputs of dtype=float
num_resamples : int
The number of resamples used to compute the confidence
intervals (default 1000)
conf_level : float
The confidence interval level (default 0.95)
print_to_console : bool
Print results directly to console (default False)
num_levels : int
The number of grid levels, must be identical to the value
passed to SALib.sample.morris (default 4)
Returns
-------
Si : dict
A dictionary of sensitivity indices containing the following entries.
- `mu` - the mean elementary effect
- `mu_star` - the absolute of the mean elementary effect
- `sigma` - the standard deviation of the elementary effect
- `mu_star_conf` - the bootstrapped confidence interval
- `names` - the names of the parameters
References
----------
.. [1] Morris, M. (1991). "Factorial Sampling Plans for Preliminary
Computational Experiments." Technometrics, 33(2):161-174,
doi:10.1080/00401706.1991.10484804.
.. [2] Campolongo, F., J. Cariboni, and A. Saltelli (2007). "An effective
screening design for sensitivity analysis of large models."
Environmental Modelling & Software, 22(10):1509-1518,
doi:10.1016/j.envsoft.2006.10.004.
Examples
--------
>>> X = morris.sample(problem, 1000, num_levels=4)
>>> Y = Ishigami.evaluate(X)
>>> Si = morris.analyze(problem, X, Y, conf_level=0.95,
>>> print_to_console=True, num_levels=4)
"""
if seed:
np.random.seed(seed)
msg = "dtype of {} array must be 'float', float32 or float64"
if X.dtype not in ["float", "float32", "float64"]:
raise ValueError(msg.format("X"))
if Y.dtype not in ["float", "float32", "float64"]:
raise ValueError(msg.format("Y"))
# Assume that there are no groups
groups = None
delta = _compute_delta(num_levels)
num_vars = problem["num_vars"]
if (problem.get("groups") is None) & (Y.size % (num_vars + 1) == 0):
num_trajectories = int(Y.size / (num_vars + 1))
elif problem.get("groups") is not None:
groups, unique_group_names = compute_groups_matrix(problem["groups"])
number_of_groups = len(unique_group_names)
num_trajectories = int(Y.size / (number_of_groups + 1))
else:
raise ValueError(
"Number of samples in model output file must be"
"a multiple of (D+1), where D is the number of"
"parameters (or groups) in your parameter file."
)
ee = np.zeros((num_vars, num_trajectories))
ee = compute_elementary_effects(X, Y, int(Y.size / num_trajectories), delta)
# Output the Mu, Mu*, and Sigma Values. Also return them in case this is
# being called from Python
Si = ResultDict(
(k, [None] * num_vars)
for k in ["names", "mu", "mu_star", "sigma", "mu_star_conf"]
)
Si["mu"] = np.average(ee, 1)
Si["mu_star"] = np.average(np.abs(ee), 1)
Si["sigma"] = np.std(ee, axis=1, ddof=1)
Si["names"] = problem["names"]
for j in range(num_vars):
Si["mu_star_conf"][j] = compute_mu_star_confidence(
ee[j, :], num_trajectories, num_resamples, conf_level
)
if groups is None:
if print_to_console:
print(
"{0:<30} {1:>10} {2:>10} {3:>15} {4:>10}".format(
"Parameter", "Mu_Star", "Mu", "Mu_Star_Conf", "Sigma"
)
)
for j in list(range(num_vars)):
print(
"{0:30} {1:10.3f} {2:10.3f} {3:15.3f} {4:10.3f}".format(
Si["names"][j],
Si["mu_star"][j],
Si["mu"][j],
Si["mu_star_conf"][j],
Si["sigma"][j],
)
)
return Si
elif groups is not None:
# if there are groups, then the elementary effects returned need to be
# computed over the groups of variables,
# rather than the individual variables
Si_grouped = ResultDict(
(k, [None] * num_vars) for k in ["mu_star", "mu_star_conf"]
)
Si_grouped["mu_star"] = compute_grouped_metric(Si["mu_star"], groups)
Si_grouped["mu_star_conf"] = compute_grouped_metric(Si["mu_star_conf"], groups)
Si_grouped["names"] = unique_group_names
Si_grouped["sigma"] = compute_grouped_sigma(Si["sigma"], groups)
Si_grouped["mu"] = compute_grouped_sigma(Si["mu"], groups)
if print_to_console:
print(
"{0:<30} {1:>10} {2:>10} {3:>15} {4:>10}".format(
"Parameter", "Mu_Star", "Mu", "Mu_Star_Conf", "Sigma"
)
)
for j in list(range(number_of_groups)):
print(
"{0:30} {1:10.3f} {2:10.3f} {3:15.3f} {4:10.3f}".format(
Si_grouped["names"][j],
Si_grouped["mu_star"][j],
Si_grouped["mu"][j],
Si_grouped["mu_star_conf"][j],
Si_grouped["sigma"][j],
)
)
return Si_grouped
else:
raise RuntimeError("Could not determine which parameters should be returned")
|
def analyze(
problem,
X,
Y,
num_resamples=100,
conf_level=0.95,
print_to_console=False,
num_levels=4,
seed=None,
):
"""Perform Morris Analysis on model outputs.
Returns a dictionary with keys 'mu', 'mu_star', 'sigma', and
'mu_star_conf', where each entry is a list of parameters containing
the indices in the same order as the parameter file.
Arguments
---------
problem : dict
The problem definition
X : numpy.matrix
The NumPy matrix containing the model inputs of dtype=float
Y : numpy.array
The NumPy array containing the model outputs of dtype=float
num_resamples : int
The number of resamples used to compute the confidence
intervals (default 1000)
conf_level : float
The confidence interval level (default 0.95)
print_to_console : bool
Print results directly to console (default False)
num_levels : int
The number of grid levels, must be identical to the value
passed to SALib.sample.morris (default 4)
Returns
-------
Si : dict
A dictionary of sensitivity indices containing the following entries.
- `mu` - the mean elementary effect
- `mu_star` - the absolute of the mean elementary effect
- `sigma` - the standard deviation of the elementary effect
- `mu_star_conf` - the bootstrapped confidence interval
- `names` - the names of the parameters
References
----------
.. [1] Morris, M. (1991). "Factorial Sampling Plans for Preliminary
Computational Experiments." Technometrics, 33(2):161-174,
doi:10.1080/00401706.1991.10484804.
.. [2] Campolongo, F., J. Cariboni, and A. Saltelli (2007). "An effective
screening design for sensitivity analysis of large models."
Environmental Modelling & Software, 22(10):1509-1518,
doi:10.1016/j.envsoft.2006.10.004.
Examples
--------
>>> X = morris.sample(problem, 1000, num_levels=4)
>>> Y = Ishigami.evaluate(X)
>>> Si = morris.analyze(problem, X, Y, conf_level=0.95,
>>> print_to_console=True, num_levels=4)
"""
if seed:
np.random.seed(seed)
msg = "dtype of {} array must be 'float', float32 or float64"
if X.dtype not in ["float", "float32", "float64"]:
raise ValueError(msg.format("X"))
if Y.dtype not in ["float", "float32", "float64"]:
raise ValueError(msg.format("Y"))
# Assume that there are no groups
groups = None
delta = compute_delta(num_levels)
num_vars = problem["num_vars"]
if (problem.get("groups") is None) & (Y.size % (num_vars + 1) == 0):
num_trajectories = int(Y.size / (num_vars + 1))
elif problem.get("groups") is not None:
groups, unique_group_names = compute_groups_matrix(problem["groups"])
number_of_groups = len(unique_group_names)
num_trajectories = int(Y.size / (number_of_groups + 1))
else:
raise ValueError(
"Number of samples in model output file must be"
"a multiple of (D+1), where D is the number of"
"parameters (or groups) in your parameter file."
)
ee = np.zeros((num_vars, num_trajectories))
ee = compute_elementary_effects(X, Y, int(Y.size / num_trajectories), delta)
# Output the Mu, Mu*, and Sigma Values. Also return them in case this is
# being called from Python
Si = ResultDict(
(k, [None] * num_vars)
for k in ["names", "mu", "mu_star", "sigma", "mu_star_conf"]
)
Si["mu"] = np.average(ee, 1)
Si["mu_star"] = np.average(np.abs(ee), 1)
Si["sigma"] = np.std(ee, axis=1, ddof=1)
Si["names"] = problem["names"]
for j in range(num_vars):
Si["mu_star_conf"][j] = compute_mu_star_confidence(
ee[j, :], num_trajectories, num_resamples, conf_level
)
if groups is None:
if print_to_console:
print(
"{0:<30} {1:>10} {2:>10} {3:>15} {4:>10}".format(
"Parameter", "Mu_Star", "Mu", "Mu_Star_Conf", "Sigma"
)
)
for j in list(range(num_vars)):
print(
"{0:30} {1:10.3f} {2:10.3f} {3:15.3f} {4:10.3f}".format(
Si["names"][j],
Si["mu_star"][j],
Si["mu"][j],
Si["mu_star_conf"][j],
Si["sigma"][j],
)
)
return Si
elif groups is not None:
# if there are groups, then the elementary effects returned need to be
# computed over the groups of variables,
# rather than the individual variables
Si_grouped = dict((k, [None] * num_vars) for k in ["mu_star", "mu_star_conf"])
Si_grouped["mu_star"] = compute_grouped_metric(Si["mu_star"], groups)
Si_grouped["mu_star_conf"] = compute_grouped_metric(Si["mu_star_conf"], groups)
Si_grouped["names"] = unique_group_names
Si_grouped["sigma"] = compute_grouped_sigma(Si["sigma"], groups)
Si_grouped["mu"] = compute_grouped_sigma(Si["mu"], groups)
if print_to_console:
print(
"{0:<30} {1:>10} {2:>10} {3:>15} {4:>10}".format(
"Parameter", "Mu_Star", "Mu", "Mu_Star_Conf", "Sigma"
)
)
for j in list(range(number_of_groups)):
print(
"{0:30} {1:10.3f} {2:10.3f} {3:15.3f} {4:10.3f}".format(
Si_grouped["names"][j],
Si_grouped["mu_star"][j],
Si_grouped["mu"][j],
Si_grouped["mu_star_conf"][j],
Si_grouped["sigma"][j],
)
)
return Si_grouped
else:
raise RuntimeError("Could not determine which parameters should be returned")
|
https://github.com/SALib/SALib/issues/5
|
Traceback (most recent call last):
File "sobol.py", line 13, in <module>
param_values = saltelli.sample(1000, param_file, calc_second_order = True)
File "../../SALib/sample/saltelli.py", line 19, in sample
base_sequence = sobol_sequence.sample(N + skip_values, 2*D)
File "../../SALib/sample/sobol_sequence.py", line 47, in sample
directions = read_directions_file('joe-kuo-6.21000.txt')
File "../../SALib/sample/sobol_sequence.py", line 120, in read_directions_file
return np.array(directions)
ValueError: setting an array element with a sequence.
|
ValueError
|
def sample(problem, N, seed=None):
"""Generate model inputs using Latin hypercube sampling (LHS).
Returns a NumPy matrix containing the model inputs generated by Latin
hypercube sampling. The resulting matrix contains N rows and D columns,
where D is the number of parameters.
Parameters
----------
problem : dict
The problem definition
N : int
The number of samples to generate
"""
if seed:
np.random.seed(seed)
D = problem["num_vars"]
result = np.empty([N, D])
temp = np.empty([N])
d = 1.0 / N
for i in range(D):
for j in range(N):
temp[j] = np.random.uniform(low=j * d, high=(j + 1) * d)
np.random.shuffle(temp)
for j in range(N):
result[j, i] = temp[j]
if not problem.get("dists"):
scale_samples(result, problem["bounds"])
return result
else:
scaled_latin = nonuniform_scale_samples(
result, problem["bounds"], problem["dists"]
)
return scaled_latin
|
def sample(problem, N, seed=None):
"""Generate model inputs using Latin hypercube sampling (LHS).
Returns a NumPy matrix containing the model inputs generated by Latin
hypercube sampling. The resulting matrix contains N rows and D columns,
where D is the number of parameters.
Parameters
----------
problem : dict
The problem definition
N : int
The number of samples to generate
"""
if seed:
np.random.seed(seed)
D = problem["num_vars"]
result = np.empty([N, D])
temp = np.empty([N])
d = 1.0 / N
for i in range(D):
for j in range(N):
temp[j] = np.random.uniform(low=j * d, high=(j + 1) * d)
np.random.shuffle(temp)
for j in range(N):
result[j, i] = temp[j]
scale_samples(result, problem["bounds"])
return result
|
https://github.com/SALib/SALib/issues/5
|
Traceback (most recent call last):
File "sobol.py", line 13, in <module>
param_values = saltelli.sample(1000, param_file, calc_second_order = True)
File "../../SALib/sample/saltelli.py", line 19, in sample
base_sequence = sobol_sequence.sample(N + skip_values, 2*D)
File "../../SALib/sample/sobol_sequence.py", line 47, in sample
directions = read_directions_file('joe-kuo-6.21000.txt')
File "../../SALib/sample/sobol_sequence.py", line 120, in read_directions_file
return np.array(directions)
ValueError: setting an array element with a sequence.
|
ValueError
|
def _initAttributes(self):
self._avatar_url = github.GithubObject.NotSet
self._bio = github.GithubObject.NotSet
self._blog = github.GithubObject.NotSet
self._collaborators = github.GithubObject.NotSet
self._company = github.GithubObject.NotSet
self._contributions = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._disk_usage = github.GithubObject.NotSet
self._email = github.GithubObject.NotSet
self._events_url = github.GithubObject.NotSet
self._followers = github.GithubObject.NotSet
self._followers_url = github.GithubObject.NotSet
self._following = github.GithubObject.NotSet
self._following_url = github.GithubObject.NotSet
self._gists_url = github.GithubObject.NotSet
self._gravatar_id = github.GithubObject.NotSet
self._hireable = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._location = github.GithubObject.NotSet
self._login = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._organizations_url = github.GithubObject.NotSet
self._owned_private_repos = github.GithubObject.NotSet
self._permissions = github.GithubObject.NotSet
self._plan = github.GithubObject.NotSet
self._private_gists = github.GithubObject.NotSet
self._public_gists = github.GithubObject.NotSet
self._public_repos = github.GithubObject.NotSet
self._received_events_url = github.GithubObject.NotSet
self._repos_url = github.GithubObject.NotSet
self._site_admin = github.GithubObject.NotSet
self._starred_url = github.GithubObject.NotSet
self._subscriptions_url = github.GithubObject.NotSet
self._total_private_repos = github.GithubObject.NotSet
self._type = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
|
def _initAttributes(self):
self._avatar_url = github.GithubObject.NotSet
self._bio = github.GithubObject.NotSet
self._blog = github.GithubObject.NotSet
self._collaborators = github.GithubObject.NotSet
self._company = github.GithubObject.NotSet
self._contributions = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._disk_usage = github.GithubObject.NotSet
self._email = github.GithubObject.NotSet
self._events_url = github.GithubObject.NotSet
self._followers = github.GithubObject.NotSet
self._followers_url = github.GithubObject.NotSet
self._following = github.GithubObject.NotSet
self._following_url = github.GithubObject.NotSet
self._gists_url = github.GithubObject.NotSet
self._gravatar_id = github.GithubObject.NotSet
self._hireable = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._location = github.GithubObject.NotSet
self._login = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._organizations_url = github.GithubObject.NotSet
self._owned_private_repos = github.GithubObject.NotSet
self._permissions = github.GithubObject.NotSet
self._plan = github.GithubObject.NotSet
self._private_gists = github.GithubObject.NotSet
self._public_gists = github.GithubObject.NotSet
self._public_repos = github.GithubObject.NotSet
self._received_events_url = github.GithubObject.NotSet
self._repos_url = github.GithubObject.NotSet
self._starred_url = github.GithubObject.NotSet
self._subscriptions_url = github.GithubObject.NotSet
self._total_private_repos = github.GithubObject.NotSet
self._type = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
|
https://github.com/PyGithub/PyGithub/issues/713
|
Traceback (most recent call last):
File "protected_test.py", line 27, in <module>
print(collab.site_admin)
AttributeError: 'NamedUser' object has no attribute 'site_admin'
|
AttributeError
|
def _useAttributes(self, attributes):
if "avatar_url" in attributes: # pragma no branch
self._avatar_url = self._makeStringAttribute(attributes["avatar_url"])
if "bio" in attributes: # pragma no branch
self._bio = self._makeStringAttribute(attributes["bio"])
if "blog" in attributes: # pragma no branch
self._blog = self._makeStringAttribute(attributes["blog"])
if "collaborators" in attributes: # pragma no branch
self._collaborators = self._makeIntAttribute(attributes["collaborators"])
if "company" in attributes: # pragma no branch
self._company = self._makeStringAttribute(attributes["company"])
if "contributions" in attributes: # pragma no branch
self._contributions = self._makeIntAttribute(attributes["contributions"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "disk_usage" in attributes: # pragma no branch
self._disk_usage = self._makeIntAttribute(attributes["disk_usage"])
if "email" in attributes: # pragma no branch
self._email = self._makeStringAttribute(attributes["email"])
if "events_url" in attributes: # pragma no branch
self._events_url = self._makeStringAttribute(attributes["events_url"])
if "followers" in attributes: # pragma no branch
self._followers = self._makeIntAttribute(attributes["followers"])
if "followers_url" in attributes: # pragma no branch
self._followers_url = self._makeStringAttribute(attributes["followers_url"])
if "following" in attributes: # pragma no branch
self._following = self._makeIntAttribute(attributes["following"])
if "following_url" in attributes: # pragma no branch
self._following_url = self._makeStringAttribute(attributes["following_url"])
if "gists_url" in attributes: # pragma no branch
self._gists_url = self._makeStringAttribute(attributes["gists_url"])
if "gravatar_id" in attributes: # pragma no branch
self._gravatar_id = self._makeStringAttribute(attributes["gravatar_id"])
if "hireable" in attributes: # pragma no branch
self._hireable = self._makeBoolAttribute(attributes["hireable"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "location" in attributes: # pragma no branch
self._location = self._makeStringAttribute(attributes["location"])
if "login" in attributes: # pragma no branch
self._login = self._makeStringAttribute(attributes["login"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "organizations_url" in attributes: # pragma no branch
self._organizations_url = self._makeStringAttribute(
attributes["organizations_url"]
)
if "owned_private_repos" in attributes: # pragma no branch
self._owned_private_repos = self._makeIntAttribute(
attributes["owned_private_repos"]
)
if "permissions" in attributes: # pragma no branch
self._permissions = self._makeClassAttribute(
github.Permissions.Permissions, attributes["permissions"]
)
if "plan" in attributes: # pragma no branch
self._plan = self._makeClassAttribute(github.Plan.Plan, attributes["plan"])
if "private_gists" in attributes: # pragma no branch
self._private_gists = self._makeIntAttribute(attributes["private_gists"])
if "public_gists" in attributes: # pragma no branch
self._public_gists = self._makeIntAttribute(attributes["public_gists"])
if "public_repos" in attributes: # pragma no branch
self._public_repos = self._makeIntAttribute(attributes["public_repos"])
if "received_events_url" in attributes: # pragma no branch
self._received_events_url = self._makeStringAttribute(
attributes["received_events_url"]
)
if "repos_url" in attributes: # pragma no branch
self._repos_url = self._makeStringAttribute(attributes["repos_url"])
if "site_admin" in attributes: # pragma no branch
self._site_admin = self._makeBoolAttribute(attributes["site_admin"])
if "starred_url" in attributes: # pragma no branch
self._starred_url = self._makeStringAttribute(attributes["starred_url"])
if "subscriptions_url" in attributes: # pragma no branch
self._subscriptions_url = self._makeStringAttribute(
attributes["subscriptions_url"]
)
if "total_private_repos" in attributes: # pragma no branch
self._total_private_repos = self._makeIntAttribute(
attributes["total_private_repos"]
)
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
|
def _useAttributes(self, attributes):
if "avatar_url" in attributes: # pragma no branch
self._avatar_url = self._makeStringAttribute(attributes["avatar_url"])
if "bio" in attributes: # pragma no branch
self._bio = self._makeStringAttribute(attributes["bio"])
if "blog" in attributes: # pragma no branch
self._blog = self._makeStringAttribute(attributes["blog"])
if "collaborators" in attributes: # pragma no branch
self._collaborators = self._makeIntAttribute(attributes["collaborators"])
if "company" in attributes: # pragma no branch
self._company = self._makeStringAttribute(attributes["company"])
if "contributions" in attributes: # pragma no branch
self._contributions = self._makeIntAttribute(attributes["contributions"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "disk_usage" in attributes: # pragma no branch
self._disk_usage = self._makeIntAttribute(attributes["disk_usage"])
if "email" in attributes: # pragma no branch
self._email = self._makeStringAttribute(attributes["email"])
if "events_url" in attributes: # pragma no branch
self._events_url = self._makeStringAttribute(attributes["events_url"])
if "followers" in attributes: # pragma no branch
self._followers = self._makeIntAttribute(attributes["followers"])
if "followers_url" in attributes: # pragma no branch
self._followers_url = self._makeStringAttribute(attributes["followers_url"])
if "following" in attributes: # pragma no branch
self._following = self._makeIntAttribute(attributes["following"])
if "following_url" in attributes: # pragma no branch
self._following_url = self._makeStringAttribute(attributes["following_url"])
if "gists_url" in attributes: # pragma no branch
self._gists_url = self._makeStringAttribute(attributes["gists_url"])
if "gravatar_id" in attributes: # pragma no branch
self._gravatar_id = self._makeStringAttribute(attributes["gravatar_id"])
if "hireable" in attributes: # pragma no branch
self._hireable = self._makeBoolAttribute(attributes["hireable"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "location" in attributes: # pragma no branch
self._location = self._makeStringAttribute(attributes["location"])
if "login" in attributes: # pragma no branch
self._login = self._makeStringAttribute(attributes["login"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "organizations_url" in attributes: # pragma no branch
self._organizations_url = self._makeStringAttribute(
attributes["organizations_url"]
)
if "owned_private_repos" in attributes: # pragma no branch
self._owned_private_repos = self._makeIntAttribute(
attributes["owned_private_repos"]
)
if "permissions" in attributes: # pragma no branch
self._permissions = self._makeClassAttribute(
github.Permissions.Permissions, attributes["permissions"]
)
if "plan" in attributes: # pragma no branch
self._plan = self._makeClassAttribute(github.Plan.Plan, attributes["plan"])
if "private_gists" in attributes: # pragma no branch
self._private_gists = self._makeIntAttribute(attributes["private_gists"])
if "public_gists" in attributes: # pragma no branch
self._public_gists = self._makeIntAttribute(attributes["public_gists"])
if "public_repos" in attributes: # pragma no branch
self._public_repos = self._makeIntAttribute(attributes["public_repos"])
if "received_events_url" in attributes: # pragma no branch
self._received_events_url = self._makeStringAttribute(
attributes["received_events_url"]
)
if "repos_url" in attributes: # pragma no branch
self._repos_url = self._makeStringAttribute(attributes["repos_url"])
if "starred_url" in attributes: # pragma no branch
self._starred_url = self._makeStringAttribute(attributes["starred_url"])
if "subscriptions_url" in attributes: # pragma no branch
self._subscriptions_url = self._makeStringAttribute(
attributes["subscriptions_url"]
)
if "total_private_repos" in attributes: # pragma no branch
self._total_private_repos = self._makeIntAttribute(
attributes["total_private_repos"]
)
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
|
https://github.com/PyGithub/PyGithub/issues/713
|
Traceback (most recent call last):
File "protected_test.py", line 27, in <module>
print(collab.site_admin)
AttributeError: 'NamedUser' object has no attribute 'site_admin'
|
AttributeError
|
def __structuredFromJson(self, data):
if len(data) == 0:
return None
else:
if atLeastPython3 and isinstance(data, bytes):
data = data.decode("utf-8")
return json.loads(data)
|
def __structuredFromJson(self, data):
if len(data) == 0:
return None
else:
return json.loads(data)
|
https://github.com/PyGithub/PyGithub/issues/142
|
import github
gh_instance = github.Github('<my-token>')
user = gh_instance.get_user()
user.name
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-4-ec5376e00d61> in <module>()
----> 1 user.name
/usr/local/lib/python3.2/dist-packages/github/AuthenticatedUser.py in name(self)
166 :type: string
167 """
--> 168 self._completeIfNotSet(self._name)
169 return self._NoneIfNotSet(self._name)
170
/usr/local/lib/python3.2/dist-packages/github/GithubObject.py in _completeIfNotSet(self, value)
61 def _completeIfNotSet(self, value):
62 if not self.__completed and value is NotSet:
---> 63 self.__complete()
64
65 def __complete(self):
/usr/local/lib/python3.2/dist-packages/github/GithubObject.py in __complete(self)
68 self._url,
69 None,
---> 70 None
71 )
72 self._useAttributes(data)
/usr/local/lib/python3.2/dist-packages/github/Requester.py in requestJsonAndCheck(self, verb, url, parameters, input)
77
78 def requestJsonAndCheck(self, verb, url, parameters, input):
---> 79 return self.__check(*self.requestJson(verb, url, parameters, input))
80
81 def requestMultipartAndCheck(self, verb, url, parameters, input):
/usr/local/lib/python3.2/dist-packages/github/Requester.py in __check(self, status, responseHeaders, output)
83
84 def __check(self, status, responseHeaders, output):
---> 85 output = self.__structuredFromJson(output)
86 if status >= 400:
87 raise GithubException.GithubException(status, output)
/usr/local/lib/python3.2/dist-packages/github/Requester.py in __structuredFromJson(self, data)
92 return None
93 else:
---> 94 return json.loads(data)
95
96 def requestJson(self, verb, url, parameters, input):
/usr/lib/python3.2/json/__init__.py in loads(s, encoding, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)
307 parse_int is None and parse_float is None and
308 parse_constant is None and object_pairs_hook is None and not kw):
--> 309 return _default_decoder.decode(s)
310 if cls is None:
311 cls = JSONDecoder
/usr/lib/python3.2/json/decoder.py in decode(self, s, _w)
351
352 """
--> 353 obj, end = self.raw_decode(s, idx=_w(s, 0).end())
354 end = _w(s, end).end()
355 if end != len(s):
TypeError: can't use a string pattern on a bytes-like object
|
TypeError
|
def __init__(self, login_or_token, password, base_url, timeout):
if password is not None:
login = login_or_token
self.__authorizationHeader = "Basic " + base64.b64encode(
login + ":" + password
).replace("\n", "")
elif login_or_token is not None:
token = login_or_token
self.__authorizationHeader = "token " + token
else:
self.__authorizationHeader = None
self.__base_url = base_url
o = urlparse.urlparse(base_url)
self.__hostname = o.hostname
self.__port = o.port
self.__prefix = o.path
self.__timeout = timeout
self.__scheme = o.scheme
if o.scheme == "https":
self.__connectionClass = self.__httpsConnectionClass
elif o.scheme == "http":
self.__connectionClass = self.__httpConnectionClass
else:
assert False # pragma no cover
self.rate_limiting = (5000, 5000)
|
def __init__(self, login_or_token, password, base_url, timeout):
if password is not None:
login = login_or_token
self.__authorizationHeader = "Basic " + base64.b64encode(
login + ":" + password
).replace("\n", "")
elif login_or_token is not None:
token = login_or_token
self.__authorizationHeader = "token " + token
else:
self.__authorizationHeader = None
self.__base_url = base_url
o = urlparse.urlparse(base_url)
self.__hostname = o.hostname
self.__port = o.port
self.__prefix = o.path
self.__timeout = timeout
if o.scheme == "https":
self.__connectionClass = self.__httpsConnectionClass
elif o.scheme == "http":
self.__connectionClass = self.__httpConnectionClass
else:
assert False # pragma no cover
self.rate_limiting = (5000, 5000)
|
https://github.com/PyGithub/PyGithub/issues/80
|
from github import Github
gh = Github( "login", "password", "base_url")
for repo in gh.get_user().get_repos():
... print repo.name
...
repo1
repo2
my-person-linux-kernel-repo
the-secret-macosx-repo
for repo in gh.get_organization( org_name ).get_repos():
... print repo.name
...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "build/bdist.macosx-10.7-intel/egg/github/Organization.py", line 311, in get_repos
File "build/bdist.macosx-10.7-intel/egg/github/Requester.py", line 60, in requestAndCheck
File "build/bdist.macosx-10.7-intel/egg/github/Requester.py", line 76, in requestRaw
AssertionError
|
AssertionError
|
def requestRaw(self, verb, url, parameters, input):
assert verb in ["HEAD", "GET", "POST", "PATCH", "PUT", "DELETE"]
# URLs generated locally will be relative to __base_url
# URLs returned from the server will start with __base_url
if url.startswith("/"):
url = self.__prefix + url
else:
o = urlparse.urlparse(url)
assert (
o.scheme == self.__scheme or o.scheme == "https" and self.__scheme == "http"
) # Issue #80
assert o.hostname == self.__hostname
assert o.path.startswith(self.__prefix)
assert o.port == self.__port
url = o.path
if o.query != "":
url += "?" + o.query
headers = dict()
if self.__authorizationHeader is not None:
headers["Authorization"] = self.__authorizationHeader
if atLeastPython26:
cnx = self.__connectionClass(
host=self.__hostname, port=self.__port, strict=True, timeout=self.__timeout
)
else: # pragma no cover
cnx = self.__connectionClass(
host=self.__hostname, port=self.__port, strict=True
) # pragma no cover
cnx.request(verb, self.__completeUrl(url, parameters), json.dumps(input), headers)
response = cnx.getresponse()
status = response.status
headers = dict(response.getheaders())
output = response.read()
cnx.close()
if "x-ratelimit-remaining" in headers and "x-ratelimit-limit" in headers:
self.rate_limiting = (
int(headers["x-ratelimit-remaining"]),
int(headers["x-ratelimit-limit"]),
)
# print verb, self.__base_url + url, parameters, input, "==>", status, str( headers ), str( output )
return status, headers, output
|
def requestRaw(self, verb, url, parameters, input):
assert verb in ["HEAD", "GET", "POST", "PATCH", "PUT", "DELETE"]
# URLs generated locally will be relative to __base_url
# URLs returned from the server will start with __base_url
if url.startswith(self.__base_url):
url = url[len(self.__base_url) :]
else:
assert url.startswith("/")
url = self.__prefix + url
headers = dict()
if self.__authorizationHeader is not None:
headers["Authorization"] = self.__authorizationHeader
if atLeastPython26:
cnx = self.__connectionClass(
host=self.__hostname, port=self.__port, strict=True, timeout=self.__timeout
)
else: # pragma no cover
cnx = self.__connectionClass(
host=self.__hostname, port=self.__port, strict=True
) # pragma no cover
cnx.request(verb, self.__completeUrl(url, parameters), json.dumps(input), headers)
response = cnx.getresponse()
status = response.status
headers = dict(response.getheaders())
output = response.read()
cnx.close()
if "x-ratelimit-remaining" in headers and "x-ratelimit-limit" in headers:
self.rate_limiting = (
int(headers["x-ratelimit-remaining"]),
int(headers["x-ratelimit-limit"]),
)
# print verb, self.__base_url + url, parameters, input, "==>", status, str( headers ), str( output )
return status, headers, output
|
https://github.com/PyGithub/PyGithub/issues/80
|
from github import Github
gh = Github( "login", "password", "base_url")
for repo in gh.get_user().get_repos():
... print repo.name
...
repo1
repo2
my-person-linux-kernel-repo
the-secret-macosx-repo
for repo in gh.get_organization( org_name ).get_repos():
... print repo.name
...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "build/bdist.macosx-10.7-intel/egg/github/Organization.py", line 311, in get_repos
File "build/bdist.macosx-10.7-intel/egg/github/Requester.py", line 60, in requestAndCheck
File "build/bdist.macosx-10.7-intel/egg/github/Requester.py", line 76, in requestRaw
AssertionError
|
AssertionError
|
def forward_request(self, method, path, data, headers):
result = handle_special_request(method, path, data, headers)
if result is not None:
return result
if not data:
data = "{}"
data = json.loads(to_str(data))
ddb_client = aws_stack.connect_to_service("dynamodb")
action = headers.get("X-Amz-Target")
if self.should_throttle(action):
return error_response_throughput()
ProxyListenerDynamoDB.thread_local.existing_item = None
if action == "%s.CreateTable" % ACTION_PREFIX:
# Check if table exists, to avoid error log output from DynamoDBLocal
if self.table_exists(ddb_client, data["TableName"]):
return error_response(
message="Table already created",
error_type="ResourceInUseException",
code=400,
)
if action == "%s.CreateGlobalTable" % ACTION_PREFIX:
return create_global_table(data)
elif action == "%s.DescribeGlobalTable" % ACTION_PREFIX:
return describe_global_table(data)
elif action == "%s.ListGlobalTables" % ACTION_PREFIX:
return list_global_tables(data)
elif action == "%s.UpdateGlobalTable" % ACTION_PREFIX:
return update_global_table(data)
elif action in (
"%s.PutItem" % ACTION_PREFIX,
"%s.UpdateItem" % ACTION_PREFIX,
"%s.DeleteItem" % ACTION_PREFIX,
):
# find an existing item and store it in a thread-local, so we can access it in return_response,
# in order to determine whether an item already existed (MODIFY) or not (INSERT)
try:
if has_event_sources_or_streams_enabled(data["TableName"]):
ProxyListenerDynamoDB.thread_local.existing_item = find_existing_item(
data
)
except Exception as e:
if "ResourceNotFoundException" in str(e):
return get_table_not_found_error()
raise
# Fix incorrect values if ReturnValues==ALL_OLD and ReturnConsumedCapacity is
# empty, see https://github.com/localstack/localstack/issues/2049
if (
(data.get("ReturnValues") == "ALL_OLD") or (not data.get("ReturnValues"))
) and not data.get("ReturnConsumedCapacity"):
data["ReturnConsumedCapacity"] = "TOTAL"
return Request(data=json.dumps(data), method=method, headers=headers)
elif action == "%s.DescribeTable" % ACTION_PREFIX:
# Check if table exists, to avoid error log output from DynamoDBLocal
if not self.table_exists(ddb_client, data["TableName"]):
return get_table_not_found_error()
elif action == "%s.DeleteTable" % ACTION_PREFIX:
# Check if table exists, to avoid error log output from DynamoDBLocal
if not self.table_exists(ddb_client, data["TableName"]):
return get_table_not_found_error()
elif action == "%s.BatchWriteItem" % ACTION_PREFIX:
existing_items = []
for table_name in sorted(data["RequestItems"].keys()):
for request in data["RequestItems"][table_name]:
for key in ["PutRequest", "DeleteRequest"]:
inner_request = request.get(key)
if inner_request:
existing_items.append(
find_existing_item(inner_request, table_name)
)
ProxyListenerDynamoDB.thread_local.existing_items = existing_items
elif action == "%s.Query" % ACTION_PREFIX:
if data.get("IndexName"):
if not is_index_query_valid(to_str(data["TableName"]), data.get("Select")):
return error_response(
message="One or more parameter values were invalid: Select type "
"ALL_ATTRIBUTES is not supported for global secondary index id-index "
"because its projection type is not ALL",
error_type="ValidationException",
code=400,
)
elif action == "%s.TransactWriteItems" % ACTION_PREFIX:
existing_items = []
for item in data["TransactItems"]:
for key in ["Put", "Update", "Delete"]:
inner_item = item.get(key)
if inner_item:
existing_items.append(find_existing_item(inner_item))
ProxyListenerDynamoDB.thread_local.existing_items = existing_items
elif action == "%s.UpdateTimeToLive" % ACTION_PREFIX:
# TODO: TTL status is maintained/mocked but no real expiry is happening for items
response = Response()
response.status_code = 200
self._table_ttl_map[data["TableName"]] = {
"AttributeName": data["TimeToLiveSpecification"]["AttributeName"],
"Status": data["TimeToLiveSpecification"]["Enabled"],
}
response._content = json.dumps(
{"TimeToLiveSpecification": data["TimeToLiveSpecification"]}
)
fix_headers_for_updated_response(response)
return response
elif action == "%s.DescribeTimeToLive" % ACTION_PREFIX:
response = Response()
response.status_code = 200
if data["TableName"] in self._table_ttl_map:
if self._table_ttl_map[data["TableName"]]["Status"]:
ttl_status = "ENABLED"
else:
ttl_status = "DISABLED"
response._content = json.dumps(
{
"TimeToLiveDescription": {
"AttributeName": self._table_ttl_map[data["TableName"]][
"AttributeName"
],
"TimeToLiveStatus": ttl_status,
}
}
)
else: # TTL for dynamodb table not set
response._content = json.dumps(
{"TimeToLiveDescription": {"TimeToLiveStatus": "DISABLED"}}
)
fix_headers_for_updated_response(response)
return response
elif (
action == "%s.TagResource" % ACTION_PREFIX
or action == "%s.UntagResource" % ACTION_PREFIX
):
response = Response()
response.status_code = 200
response._content = "" # returns an empty body on success.
fix_headers_for_updated_response(response)
return response
elif action == "%s.ListTagsOfResource" % ACTION_PREFIX:
response = Response()
response.status_code = 200
response._content = json.dumps(
{
"Tags": [
{"Key": k, "Value": v}
for k, v in TABLE_TAGS.get(data["ResourceArn"], {}).items()
]
}
)
fix_headers_for_updated_response(response)
return response
return True
|
def forward_request(self, method, path, data, headers):
result = handle_special_request(method, path, data, headers)
if result is not None:
return result
if not data:
data = "{}"
data = json.loads(to_str(data))
ddb_client = aws_stack.connect_to_service("dynamodb")
action = headers.get("X-Amz-Target")
if self.should_throttle(action):
return self.error_response_throughput()
ProxyListenerDynamoDB.thread_local.existing_item = None
if action == "%s.CreateTable" % ACTION_PREFIX:
# Check if table exists, to avoid error log output from DynamoDBLocal
if self.table_exists(ddb_client, data["TableName"]):
return error_response(
message="Table already created",
error_type="ResourceInUseException",
code=400,
)
if action == "%s.CreateGlobalTable" % ACTION_PREFIX:
return create_global_table(data)
elif action == "%s.DescribeGlobalTable" % ACTION_PREFIX:
return describe_global_table(data)
elif action == "%s.ListGlobalTables" % ACTION_PREFIX:
return list_global_tables(data)
elif action == "%s.UpdateGlobalTable" % ACTION_PREFIX:
return update_global_table(data)
elif action in (
"%s.PutItem" % ACTION_PREFIX,
"%s.UpdateItem" % ACTION_PREFIX,
"%s.DeleteItem" % ACTION_PREFIX,
):
# find an existing item and store it in a thread-local, so we can access it in return_response,
# in order to determine whether an item already existed (MODIFY) or not (INSERT)
try:
if has_event_sources_or_streams_enabled(data["TableName"]):
ProxyListenerDynamoDB.thread_local.existing_item = find_existing_item(
data
)
except Exception as e:
if "ResourceNotFoundException" in str(e):
return get_table_not_found_error()
raise
# Fix incorrect values if ReturnValues==ALL_OLD and ReturnConsumedCapacity is
# empty, see https://github.com/localstack/localstack/issues/2049
if (
(data.get("ReturnValues") == "ALL_OLD") or (not data.get("ReturnValues"))
) and not data.get("ReturnConsumedCapacity"):
data["ReturnConsumedCapacity"] = "TOTAL"
return Request(data=json.dumps(data), method=method, headers=headers)
elif action == "%s.DescribeTable" % ACTION_PREFIX:
# Check if table exists, to avoid error log output from DynamoDBLocal
if not self.table_exists(ddb_client, data["TableName"]):
return get_table_not_found_error()
elif action == "%s.DeleteTable" % ACTION_PREFIX:
# Check if table exists, to avoid error log output from DynamoDBLocal
if not self.table_exists(ddb_client, data["TableName"]):
return get_table_not_found_error()
elif action == "%s.BatchWriteItem" % ACTION_PREFIX:
existing_items = []
for table_name in sorted(data["RequestItems"].keys()):
for request in data["RequestItems"][table_name]:
for key in ["PutRequest", "DeleteRequest"]:
inner_request = request.get(key)
if inner_request:
existing_items.append(
find_existing_item(inner_request, table_name)
)
ProxyListenerDynamoDB.thread_local.existing_items = existing_items
elif action == "%s.Query" % ACTION_PREFIX:
if data.get("IndexName"):
if not is_index_query_valid(to_str(data["TableName"]), data.get("Select")):
return error_response(
message="One or more parameter values were invalid: Select type "
"ALL_ATTRIBUTES is not supported for global secondary index id-index "
"because its projection type is not ALL",
error_type="ValidationException",
code=400,
)
elif action == "%s.TransactWriteItems" % ACTION_PREFIX:
existing_items = []
for item in data["TransactItems"]:
for key in ["Put", "Update", "Delete"]:
inner_item = item.get(key)
if inner_item:
existing_items.append(find_existing_item(inner_item))
ProxyListenerDynamoDB.thread_local.existing_items = existing_items
elif action == "%s.UpdateTimeToLive" % ACTION_PREFIX:
# TODO: TTL status is maintained/mocked but no real expiry is happening for items
response = Response()
response.status_code = 200
self._table_ttl_map[data["TableName"]] = {
"AttributeName": data["TimeToLiveSpecification"]["AttributeName"],
"Status": data["TimeToLiveSpecification"]["Enabled"],
}
response._content = json.dumps(
{"TimeToLiveSpecification": data["TimeToLiveSpecification"]}
)
fix_headers_for_updated_response(response)
return response
elif action == "%s.DescribeTimeToLive" % ACTION_PREFIX:
response = Response()
response.status_code = 200
if data["TableName"] in self._table_ttl_map:
if self._table_ttl_map[data["TableName"]]["Status"]:
ttl_status = "ENABLED"
else:
ttl_status = "DISABLED"
response._content = json.dumps(
{
"TimeToLiveDescription": {
"AttributeName": self._table_ttl_map[data["TableName"]][
"AttributeName"
],
"TimeToLiveStatus": ttl_status,
}
}
)
else: # TTL for dynamodb table not set
response._content = json.dumps(
{"TimeToLiveDescription": {"TimeToLiveStatus": "DISABLED"}}
)
fix_headers_for_updated_response(response)
return response
elif (
action == "%s.TagResource" % ACTION_PREFIX
or action == "%s.UntagResource" % ACTION_PREFIX
):
response = Response()
response.status_code = 200
response._content = "" # returns an empty body on success.
fix_headers_for_updated_response(response)
return response
elif action == "%s.ListTagsOfResource" % ACTION_PREFIX:
response = Response()
response.status_code = 200
response._content = json.dumps(
{
"Tags": [
{"Key": k, "Value": v}
for k, v in TABLE_TAGS.get(data["ResourceArn"], {}).items()
]
}
)
fix_headers_for_updated_response(response)
return response
return True
|
https://github.com/localstack/localstack/issues/3502
|
localstack_main | 2021-01-22T22:14:37:WARNING:localstack.utils.server.http2_server: Error in proxy handler for request POST http://localhost:4566/: 'ProxyListenerDynamoDB' object has no attribute 'error_response_throughput' Traceback (most recent call last):
localstack_main | File "/opt/code/localstack/localstack/utils/server/http2_server.py", line 107, in index
localstack_main | raise result
localstack_main | File "/opt/code/localstack/localstack/utils/bootstrap.py", line 581, in run
localstack_main | result = self.func(self.params)
localstack_main | File "/opt/code/localstack/localstack/utils/async_utils.py", line 28, in _run
localstack_main | return fn(*args, **kwargs)
localstack_main | File "/opt/code/localstack/localstack/services/generic_proxy.py", line 605, in handler
localstack_main | response = modify_and_forward(method=method, path=path_with_params, data_bytes=data, headers=headers,
localstack_main | File "/opt/code/localstack/localstack/services/generic_proxy.py", line 373, in modify_and_forward
localstack_main | listener_result = listener.forward_request(method=method,
localstack_main | File "/opt/code/localstack/localstack/services/edge.py", line 105, in forward_request
localstack_main | return do_forward_request(api, method, path, data, headers, port=port)
localstack_main | File "/opt/code/localstack/localstack/services/edge.py", line 116, in do_forward_request
localstack_main | result = do_forward_request_inmem(api, method, path, data, headers, port=port)
localstack_main | File "/opt/code/localstack/localstack/services/edge.py", line 136, in do_forward_request_inmem
localstack_main | response = modify_and_forward(method=method, path=path, data_bytes=data, headers=headers,
localstack_main | File "/opt/code/localstack/localstack/services/generic_proxy.py", line 373, in modify_and_forward
localstack_main | listener_result = listener.forward_request(method=method,
localstack_main | File "/opt/code/localstack/localstack/services/dynamodb/dynamodb_listener.py", line 97, in forward_request
localstack_main | return self.error_response_throughput()
localstack_main | AttributeError: 'ProxyListenerDynamoDB' object has no attribute 'error_response_throughput'
|
AttributeError
|
def get_scheduled_rule_func(data):
def func(*args):
rule_name = data.get("Name")
client = aws_stack.connect_to_service("events")
targets = client.list_targets_by_rule(Rule=rule_name)["Targets"]
if targets:
LOG.debug(
"Notifying %s targets in response to triggered Events rule %s"
% (len(targets), rule_name)
)
for target in targets:
arn = target.get("Arn")
event = json.loads(target.get("Input") or "{}")
attr = aws_stack.get_events_target_attributes(target)
aws_stack.send_event_to_target(arn, event, target_attributes=attr)
return func
|
def get_scheduled_rule_func(data):
def func(*args):
rule_name = data.get("Name")
client = aws_stack.connect_to_service("events")
targets = client.list_targets_by_rule(Rule=rule_name)["Targets"]
if targets:
LOG.debug(
"Notifying %s targets in response to triggered Events rule %s"
% (len(targets), rule_name)
)
for target in targets:
arn = target.get("Arn")
event = json.loads(target.get("Input") or "{}")
aws_stack.send_event_to_target(arn, event)
return func
|
https://github.com/localstack/localstack/issues/3402
|
2020-12-29T04:01:46:WARNING:bootstrap.py: Thread run method <function get_scheduled_rule_func.<locals>.func at 0x7f0ceb256e50>(None) failed: Traceback (most recent call last):
File "/opt/code/localstack/localstack/utils/bootstrap.py", line 514, in run
self.func(self.params)
File "/opt/code/localstack/localstack/services/events/events_listener.py", line 75, in func
sqs_client.send_message(QueueUrl=queue_url, MessageBody=json.dumps(event))
File "/opt/code/localstack/.venv/lib/python3.8/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/opt/code/localstack/.venv/lib/python3.8/site-packages/botocore/client.py", line 635, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (MissingParameter) when calling the SendMessage operation: The request must contain the parameter MessageGroupId.
|
botocore.exceptions.ClientError
|
def func(*args):
rule_name = data.get("Name")
client = aws_stack.connect_to_service("events")
targets = client.list_targets_by_rule(Rule=rule_name)["Targets"]
if targets:
LOG.debug(
"Notifying %s targets in response to triggered Events rule %s"
% (len(targets), rule_name)
)
for target in targets:
arn = target.get("Arn")
event = json.loads(target.get("Input") or "{}")
attr = aws_stack.get_events_target_attributes(target)
aws_stack.send_event_to_target(arn, event, target_attributes=attr)
|
def func(*args):
rule_name = data.get("Name")
client = aws_stack.connect_to_service("events")
targets = client.list_targets_by_rule(Rule=rule_name)["Targets"]
if targets:
LOG.debug(
"Notifying %s targets in response to triggered Events rule %s"
% (len(targets), rule_name)
)
for target in targets:
arn = target.get("Arn")
event = json.loads(target.get("Input") or "{}")
aws_stack.send_event_to_target(arn, event)
|
https://github.com/localstack/localstack/issues/3402
|
2020-12-29T04:01:46:WARNING:bootstrap.py: Thread run method <function get_scheduled_rule_func.<locals>.func at 0x7f0ceb256e50>(None) failed: Traceback (most recent call last):
File "/opt/code/localstack/localstack/utils/bootstrap.py", line 514, in run
self.func(self.params)
File "/opt/code/localstack/localstack/services/events/events_listener.py", line 75, in func
sqs_client.send_message(QueueUrl=queue_url, MessageBody=json.dumps(event))
File "/opt/code/localstack/.venv/lib/python3.8/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/opt/code/localstack/.venv/lib/python3.8/site-packages/botocore/client.py", line 635, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (MissingParameter) when calling the SendMessage operation: The request must contain the parameter MessageGroupId.
|
botocore.exceptions.ClientError
|
def send_event_to_target(arn, event, target_attributes=None):
if ":lambda:" in arn:
from localstack.services.awslambda import lambda_api
lambda_api.run_lambda(event=event, context={}, func_arn=arn)
elif ":sns:" in arn:
sns_client = connect_to_service("sns")
sns_client.publish(TopicArn=arn, Message=json.dumps(event))
elif ":sqs:" in arn:
sqs_client = connect_to_service("sqs")
queue_url = get_sqs_queue_url(arn)
msg_group_id = (target_attributes or {}).get("MessageGroupId")
kwargs = {"MessageGroupId": msg_group_id} if msg_group_id else {}
sqs_client.send_message(
QueueUrl=queue_url, MessageBody=json.dumps(event), **kwargs
)
elif ":states" in arn:
stepfunctions_client = connect_to_service("stepfunctions")
stepfunctions_client.start_execution(
stateMachineArn=arn, input=json.dumps(event)
)
else:
LOG.info('Unsupported Events rule target ARN "%s"' % arn)
|
def send_event_to_target(arn, event):
if ":lambda:" in arn:
from localstack.services.awslambda import lambda_api
lambda_api.run_lambda(event=event, context={}, func_arn=arn)
elif ":sns:" in arn:
sns_client = connect_to_service("sns")
sns_client.publish(TopicArn=arn, Message=json.dumps(event))
elif ":sqs:" in arn:
sqs_client = connect_to_service("sqs")
queue_url = get_sqs_queue_url(arn)
sqs_client.send_message(QueueUrl=queue_url, MessageBody=json.dumps(event))
elif ":states" in arn:
stepfunctions_client = connect_to_service("stepfunctions")
stepfunctions_client.start_execution(
stateMachineArn=arn, input=json.dumps(event)
)
else:
LOG.info('Unsupported Events rule target ARN "%s"' % arn)
|
https://github.com/localstack/localstack/issues/3402
|
2020-12-29T04:01:46:WARNING:bootstrap.py: Thread run method <function get_scheduled_rule_func.<locals>.func at 0x7f0ceb256e50>(None) failed: Traceback (most recent call last):
File "/opt/code/localstack/localstack/utils/bootstrap.py", line 514, in run
self.func(self.params)
File "/opt/code/localstack/localstack/services/events/events_listener.py", line 75, in func
sqs_client.send_message(QueueUrl=queue_url, MessageBody=json.dumps(event))
File "/opt/code/localstack/.venv/lib/python3.8/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/opt/code/localstack/.venv/lib/python3.8/site-packages/botocore/client.py", line 635, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (MissingParameter) when calling the SendMessage operation: The request must contain the parameter MessageGroupId.
|
botocore.exceptions.ClientError
|
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
|
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.next_port = 1
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
|
https://github.com/localstack/localstack/issues/1892
|
2019-12-20T09:45:49:DEBUG:localstack.services.awslambda.lambda_executors: Using entrypoint "/var/rapid/init --bootstrap /var/runtime/bootstrap" for container "localstack_lambda_arn_aws_lambda_us-east-1_000000000000_function_rb-files-hub-service-dev-rb-files-hub" on network "host".
2019-12-20T09:45:49:DEBUG:localstack.services.awslambda.lambda_executors: Command for docker-reuse Lambda executor: docker exec -e NODE_ENV="$NODE_ENV" -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY" -e HOSTNAME="$HOSTNAME" -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME" -e AWS_LAMBDA_FUNCTION_NAME="$AWS_LAMBDA_FUNCTION_NAME" -e AWS_LAMBDA_FUNCTION_VERSION="$AWS_LAMBDA_FUNCTION_VERSION" -e AWS_LAMBDA_FUNCTION_INVOKED_ARN="$AWS_LAMBDA_FUNCTION_INVOKED_ARN" -e _LAMBDA_SERVER_PORT="$_LAMBDA_SERVER_PORT" localstack_lambda_arn_aws_lambda_us-east-1_000000000000_function_rb-files-hub-service-dev-rb-files-hub /var/rapid/init --bootstrap /var/runtime/bootstrap index.handler
2019-12-20T09:45:49:DEBUG:localstack.services.awslambda.lambda_executors: Running lambda cmd: docker exec -e NODE_ENV="$NODE_ENV" -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY" -e HOSTNAME="$HOSTNAME" -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME" -e AWS_LAMBDA_FUNCTION_NAME="$AWS_LAMBDA_FUNCTION_NAME" -e AWS_LAMBDA_FUNCTION_VERSION="$AWS_LAMBDA_FUNCTION_VERSION" -e AWS_LAMBDA_FUNCTION_INVOKED_ARN="$AWS_LAMBDA_FUNCTION_INVOKED_ARN" -e _LAMBDA_SERVER_PORT="$_LAMBDA_SERVER_PORT" localstack_lambda_arn_aws_lambda_us-east-1_000000000000_function_rb-files-hub-service-dev-rb-files-hub /var/rapid/init --bootstrap /var/runtime/bootstrap index.handler
2019-12-20T09:45:49:WARNING:bootstrap.py: Thread run method <function LambdaExecutor.execute.<locals>.do_execute at 0x7f59b11db4d0>(None) failed: Traceback (most recent call last):
File "/opt/code/localstack/localstack/utils/bootstrap.py", line 413, in run
self.func(self.params)
File "/opt/code/localstack/localstack/services/awslambda/lambda_executors.py", line 70, in do_execute
result, log_output = self._execute(func_arn, func_details, event, context, version)
File "/opt/code/localstack/localstack/services/awslambda/lambda_executors.py", line 241, in _execute
result, log_output = self.run_lambda_executor(cmd, stdin, environment)
File "/opt/code/localstack/localstack/services/awslambda/lambda_executors.py", line 161, in run_lambda_executor
(return_code, result, log_output))
Exception: Lambda process returned error status code: 1. Result: . Output:
2019/12/20 09:45:49 listen tcp :9001: bind: address already in use
|
Exception
|
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
entrypoint = ""
if command:
entrypoint = ' --entrypoint ""'
else:
command = '"%s"' % handler
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ""
if network == "host":
port = get_free_tcp_port()
env_vars["DOCKER_LAMBDA_API_PORT"] = port
env_vars["DOCKER_LAMBDA_RUNTIME_PORT"] = port
env_vars_string = " ".join(
['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()]
)
debug_docker_java_port = (
"-p {p}:{p}".format(p=Util.debug_java_port) if Util.debug_java_port else ""
)
docker_cmd = self._docker_cmd()
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(%s create -i'
" %s" # entrypoint
" %s" # debug_docker_java_port
" %s" # env
" %s" # network
" %s" # --rm flag
" %s %s" # image and command
')";'
'%s cp "%s/." "$CONTAINER_ID:/var/task"; '
'%s start -ai "$CONTAINER_ID";'
) % (
docker_cmd,
entrypoint,
debug_docker_java_port,
env_vars_string,
network_str,
rm_flag,
docker_image,
command,
docker_cmd,
lambda_cwd,
docker_cmd,
)
else:
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
"%s run -i"
' %s -v "%s":/var/task'
" %s"
" %s" # network
" %s" # --rm flag
" %s %s"
) % (
docker_cmd,
entrypoint,
lambda_cwd_on_host,
env_vars_string,
network_str,
rm_flag,
docker_image,
command,
)
return cmd
|
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
entrypoint = ""
if command:
entrypoint = ' --entrypoint ""'
else:
command = '"%s"' % handler
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ""
if network == "host":
port = str(self.next_port + self.port_offset)
env_vars["DOCKER_LAMBDA_API_PORT"] = port
env_vars["DOCKER_LAMBDA_RUNTIME_PORT"] = port
self.next_port = (self.next_port + 1) % self.max_port
env_vars_string = " ".join(
['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()]
)
debug_docker_java_port = (
"-p {p}:{p}".format(p=Util.debug_java_port) if Util.debug_java_port else ""
)
docker_cmd = self._docker_cmd()
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(%s create -i'
" %s" # entrypoint
" %s" # debug_docker_java_port
" %s" # env
" %s" # network
" %s" # --rm flag
" %s %s" # image and command
')";'
'%s cp "%s/." "$CONTAINER_ID:/var/task"; '
'%s start -ai "$CONTAINER_ID";'
) % (
docker_cmd,
entrypoint,
debug_docker_java_port,
env_vars_string,
network_str,
rm_flag,
docker_image,
command,
docker_cmd,
lambda_cwd,
docker_cmd,
)
else:
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
"%s run -i"
' %s -v "%s":/var/task'
" %s"
" %s" # network
" %s" # --rm flag
" %s %s"
) % (
docker_cmd,
entrypoint,
lambda_cwd_on_host,
env_vars_string,
network_str,
rm_flag,
docker_image,
command,
)
return cmd
|
https://github.com/localstack/localstack/issues/1892
|
2019-12-20T09:45:49:DEBUG:localstack.services.awslambda.lambda_executors: Using entrypoint "/var/rapid/init --bootstrap /var/runtime/bootstrap" for container "localstack_lambda_arn_aws_lambda_us-east-1_000000000000_function_rb-files-hub-service-dev-rb-files-hub" on network "host".
2019-12-20T09:45:49:DEBUG:localstack.services.awslambda.lambda_executors: Command for docker-reuse Lambda executor: docker exec -e NODE_ENV="$NODE_ENV" -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY" -e HOSTNAME="$HOSTNAME" -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME" -e AWS_LAMBDA_FUNCTION_NAME="$AWS_LAMBDA_FUNCTION_NAME" -e AWS_LAMBDA_FUNCTION_VERSION="$AWS_LAMBDA_FUNCTION_VERSION" -e AWS_LAMBDA_FUNCTION_INVOKED_ARN="$AWS_LAMBDA_FUNCTION_INVOKED_ARN" -e _LAMBDA_SERVER_PORT="$_LAMBDA_SERVER_PORT" localstack_lambda_arn_aws_lambda_us-east-1_000000000000_function_rb-files-hub-service-dev-rb-files-hub /var/rapid/init --bootstrap /var/runtime/bootstrap index.handler
2019-12-20T09:45:49:DEBUG:localstack.services.awslambda.lambda_executors: Running lambda cmd: docker exec -e NODE_ENV="$NODE_ENV" -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY" -e HOSTNAME="$HOSTNAME" -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME" -e AWS_LAMBDA_FUNCTION_NAME="$AWS_LAMBDA_FUNCTION_NAME" -e AWS_LAMBDA_FUNCTION_VERSION="$AWS_LAMBDA_FUNCTION_VERSION" -e AWS_LAMBDA_FUNCTION_INVOKED_ARN="$AWS_LAMBDA_FUNCTION_INVOKED_ARN" -e _LAMBDA_SERVER_PORT="$_LAMBDA_SERVER_PORT" localstack_lambda_arn_aws_lambda_us-east-1_000000000000_function_rb-files-hub-service-dev-rb-files-hub /var/rapid/init --bootstrap /var/runtime/bootstrap index.handler
2019-12-20T09:45:49:WARNING:bootstrap.py: Thread run method <function LambdaExecutor.execute.<locals>.do_execute at 0x7f59b11db4d0>(None) failed: Traceback (most recent call last):
File "/opt/code/localstack/localstack/utils/bootstrap.py", line 413, in run
self.func(self.params)
File "/opt/code/localstack/localstack/services/awslambda/lambda_executors.py", line 70, in do_execute
result, log_output = self._execute(func_arn, func_details, event, context, version)
File "/opt/code/localstack/localstack/services/awslambda/lambda_executors.py", line 241, in _execute
result, log_output = self.run_lambda_executor(cmd, stdin, environment)
File "/opt/code/localstack/localstack/services/awslambda/lambda_executors.py", line 161, in run_lambda_executor
(return_code, result, log_output))
Exception: Lambda process returned error status code: 1. Result: . Output:
2019/12/20 09:45:49 listen tcp :9001: bind: address already in use
|
Exception
|
def forward_request(self, method, path, data, headers):
if method == "OPTIONS":
return 200
if path.split("?")[0] == "/health":
return serve_health_endpoint(method, path, data)
# kill the process if we receive this header
headers.get(HEADER_KILL_SIGNAL) and os._exit(0)
target = headers.get("x-amz-target", "")
auth_header = headers.get("authorization", "")
host = headers.get("host", "")
headers[HEADER_LOCALSTACK_EDGE_URL] = "https://%s" % host
# extract API details
api, port, path, host = get_api_from_headers(headers, path)
if port and int(port) < 0:
return 404
if not port:
port = get_port_from_custom_rules(method, path, data, headers) or port
if not port:
if api in ["", None, "_unknown_"]:
truncated = truncate(data)
LOG.info(
(
'Unable to find forwarding rule for host "%s", path "%s", '
'target header "%s", auth header "%s", data "%s"'
)
% (host, path, target, auth_header, truncated)
)
else:
LOG.info(
(
'Unable to determine forwarding port for API "%s" - please '
"make sure this API is enabled via the SERVICES configuration"
)
% api
)
response = Response()
response.status_code = 404
response._content = '{"status": "running"}'
return response
use_ssl = config.USE_SSL
connect_host = "%s:%s" % (config.HOSTNAME, port)
url = "http%s://%s%s" % ("s" if use_ssl else "", connect_host, path)
headers["Host"] = host
function = getattr(requests, method.lower())
if isinstance(data, dict):
data = json.dumps(data)
response = function(url, data=data, headers=headers, verify=False)
return response
|
def forward_request(self, method, path, data, headers):
if method == "OPTIONS":
return 200
# kill the process if we receive this header
headers.get(HEADER_KILL_SIGNAL) and os._exit(0)
target = headers.get("x-amz-target", "")
auth_header = headers.get("authorization", "")
host = headers.get("host", "")
headers[HEADER_LOCALSTACK_EDGE_URL] = "https://%s" % host
# extract API details
api, port, path, host = get_api_from_headers(headers, path)
if port and int(port) < 0:
return 404
if not port:
port = get_port_from_custom_rules(method, path, data, headers) or port
if not port:
if api in ["", None, "_unknown_"]:
truncated = truncate(data)
LOG.info(
(
'Unable to find forwarding rule for host "%s", path "%s", '
'target header "%s", auth header "%s", data "%s"'
)
% (host, path, target, auth_header, truncated)
)
else:
LOG.info(
(
'Unable to determine forwarding port for API "%s" - please '
"make sure this API is enabled via the SERVICES configuration"
)
% api
)
response = Response()
response.status_code = 404
response._content = '{"status": "running"}'
return response
use_ssl = config.USE_SSL
connect_host = "%s:%s" % (config.HOSTNAME, port)
url = "http%s://%s%s" % ("s" if use_ssl else "", connect_host, path)
headers["Host"] = host
function = getattr(requests, method.lower())
if isinstance(data, dict):
data = json.dumps(data)
response = function(url, data=data, headers=headers, verify=False)
return response
|
https://github.com/localstack/localstack/issues/2534
|
2020-06-09T11:04:42:ERROR:localstack.services.dynamodb.dynamodb_starter: DynamoDB health check failed: an integer is required (got type NoneType) Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/dynamodb/dynamodb_starter.py", line 23, in check_dynamodb
wait_for_port_open(PORT_DYNAMODB_BACKEND, http_path='/', expect_success=False, sleep_time=1)
File "/opt/code/localstack/localstack/utils/common.py", line 412, in wait_for_port_open
return retry(check, sleep=sleep_time, retries=retries)
File "/opt/code/localstack/localstack/utils/common.py", line 460, in retry
raise raise_error
File "/opt/code/localstack/localstack/utils/common.py", line 456, in retry
return function(**kwargs)
File "/opt/code/localstack/localstack/utils/common.py", line 409, in check
if not is_port_open(port, http_path=http_path, expect_success=expect_success):
File "/opt/code/localstack/localstack/utils/common.py", line 392, in is_port_open
result = sock.connect_ex((host, port))
TypeError: an integer is required (got type NoneType)
|
TypeError
|
def record_service_health(api, status):
data = {api: status}
health_url = "%s://%s:%s/health" % (
get_service_protocol(),
config.LOCALHOST,
config.EDGE_PORT,
)
try:
requests.put(health_url, data=json.dumps(data))
except Exception:
# ignore for now, if the service is not running
pass
|
def record_service_health(api, status):
data = {api: status}
health_url = "%s://%s:%s/health" % (
get_service_protocol(),
config.LOCALHOST,
config.PORT_WEB_UI,
)
try:
requests.put(health_url, data=json.dumps(data))
except Exception:
# ignore for now, if the service is not running
pass
|
https://github.com/localstack/localstack/issues/2534
|
2020-06-09T11:04:42:ERROR:localstack.services.dynamodb.dynamodb_starter: DynamoDB health check failed: an integer is required (got type NoneType) Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/dynamodb/dynamodb_starter.py", line 23, in check_dynamodb
wait_for_port_open(PORT_DYNAMODB_BACKEND, http_path='/', expect_success=False, sleep_time=1)
File "/opt/code/localstack/localstack/utils/common.py", line 412, in wait_for_port_open
return retry(check, sleep=sleep_time, retries=retries)
File "/opt/code/localstack/localstack/utils/common.py", line 460, in retry
raise raise_error
File "/opt/code/localstack/localstack/utils/common.py", line 456, in retry
return function(**kwargs)
File "/opt/code/localstack/localstack/utils/common.py", line 409, in check
if not is_port_open(port, http_path=http_path, expect_success=expect_success):
File "/opt/code/localstack/localstack/utils/common.py", line 392, in is_port_open
result = sock.connect_ex((host, port))
TypeError: an integer is required (got type NoneType)
|
TypeError
|
def apply_patches():
"""Apply patches to make LocalStack seamlessly interact with the moto backend.
TODO: Eventually, these patches should be contributed to the upstream repo!"""
# add model mappings to moto
parsing.MODEL_MAP.update(MODEL_MAP)
# fix account ID
parsing.ACCOUNT_ID = TEST_AWS_ACCOUNT_ID
# Patch clean_json in moto
def clean_json(resource_json, resources_map):
result = clean_json_orig(resource_json, resources_map)
if isinstance(result, BaseModel):
if isinstance(resource_json, dict) and "Ref" in resource_json:
entity_id = get_entity_id(result, resource_json)
if entity_id:
return entity_id
LOG.warning(
'Unable to resolve "Ref" attribute for: %s - %s - %s',
resource_json,
result,
type(result),
)
return result
clean_json_orig = parsing.clean_json
parsing.clean_json = clean_json
# Patch parse_and_create_resource method in moto to deploy resources in LocalStack
def parse_and_create_resource(
logical_id, resource_json, resources_map, region_name, force_create=False
):
try:
return _parse_and_create_resource(
logical_id,
resource_json,
resources_map,
region_name,
force_create=force_create,
)
except Exception as e:
LOG.error(
'Unable to parse and create resource "%s": %s %s'
% (logical_id, e, traceback.format_exc())
)
raise
def parse_and_update_resource(
logical_id, resource_json, resources_map, region_name
):
try:
return _parse_and_create_resource(
logical_id, resource_json, resources_map, region_name, update=True
)
except Exception as e:
LOG.error(
'Unable to parse and update resource "%s": %s %s'
% (logical_id, e, traceback.format_exc())
)
raise
def _parse_and_create_resource(
logical_id,
resource_json,
resources_map,
region_name,
update=False,
force_create=False,
):
stack_name = resources_map.get("AWS::StackName")
resource_hash_key = (stack_name, logical_id)
props = resource_json["Properties"] = resource_json.get("Properties") or {}
# If the current stack is being updated, avoid infinite recursion
updating = CURRENTLY_UPDATING_RESOURCES.get(resource_hash_key)
LOG.debug(
"Currently processing stack resource %s/%s: %s"
% (stack_name, logical_id, updating)
)
if updating:
return None
# parse and get final resource JSON
resource_tuple = parsing.parse_resource(
logical_id, resource_json, resources_map
)
if not resource_tuple:
return None
_, resource_json, _ = resource_tuple
def add_default_props(resource_props):
"""apply some fixes which otherwise cause deployments to fail"""
res_type = resource_props["Type"]
props = resource_props.get("Properties", {})
if res_type == "AWS::Lambda::EventSourceMapping" and not props.get(
"StartingPosition"
):
props["StartingPosition"] = "LATEST"
# generate default names for certain resource types
default_attrs = (
("AWS::IAM::Role", "RoleName"),
("AWS::Events::Rule", "Name"),
)
for entry in default_attrs:
if res_type == entry[0] and not props.get(entry[1]):
props[entry[1]] = "cf-%s-%s" % (
stack_name,
md5(canonical_json(props)),
)
# add some fixes and default props which otherwise cause deployments to fail
add_default_props(resource_json)
for resource in resources_map._resource_json_map.values():
add_default_props(resource)
# check if this resource already exists in the resource map
resource = resources_map._parsed_resources.get(logical_id)
if resource and not update and not force_create:
return resource
# fix resource ARNs, make sure to convert account IDs 000000000000 to 123456789012
resource_json_arns_fixed = clone(json_safe(convert_objs_to_ids(resource_json)))
set_moto_account_ids(resource_json_arns_fixed)
# create resource definition and store CloudFormation metadata in moto
moto_create_error = None
if (resource or update) and not force_create:
parse_and_update_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
elif not resource:
try:
resource = parse_and_create_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
if not resource:
# this can happen if the resource has an associated Condition which evaluates to false
return resource
resource.logical_id = logical_id
except Exception as e:
moto_create_error = e
# check whether this resource needs to be deployed
resource_map_new = dict(resources_map._resource_json_map)
resource_map_new[logical_id] = resource_json
should_be_created = template_deployer.should_be_deployed(
logical_id, resource_map_new, stack_name
)
# check for moto creation errors and raise an exception if needed
if moto_create_error:
if should_be_created:
raise moto_create_error
else:
LOG.info(
"Error on moto CF resource creation. Ignoring, as should_be_created=%s: %s"
% (should_be_created, moto_create_error)
)
# Fix for moto which sometimes hard-codes region name as 'us-east-1'
if hasattr(resource, "region_name") and resource.region_name != region_name:
LOG.debug(
"Updating incorrect region from %s to %s"
% (resource.region_name, region_name)
)
resource.region_name = region_name
# check whether this resource needs to be deployed
is_updateable = False
if not should_be_created:
# This resource is either not deployable or already exists. Check if it can be updated
is_updateable = template_deployer.is_updateable(
logical_id, resource_map_new, stack_name
)
if not update or not is_updateable:
all_satisfied = template_deployer.all_resource_dependencies_satisfied(
logical_id, resource_map_new, stack_name
)
if not all_satisfied:
LOG.info(
"Resource %s cannot be deployed, found unsatisfied dependencies. %s"
% (logical_id, resource_json)
)
details = [logical_id, resource_json, resources_map, region_name]
resources_map._unresolved_resources = getattr(
resources_map, "_unresolved_resources", {}
)
resources_map._unresolved_resources[logical_id] = details
else:
LOG.debug(
"Resource %s need not be deployed (is_updateable=%s): %s %s"
% (logical_id, is_updateable, resource_json, bool(resource))
)
# Return if this resource already exists and can/need not be updated yet
# NOTE: We should always return the resource here, to avoid duplicate
# creation of resources in moto!
return resource
# Apply some fixes/patches to the resource names, then deploy resource in LocalStack
update_resource_name(resource, resource_json)
LOG.debug(
"Deploying CloudFormation resource (update=%s, exists=%s, updateable=%s): %s"
% (update, not should_be_created, is_updateable, resource_json)
)
try:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = True
deploy_func = (
template_deployer.update_resource
if update
else template_deployer.deploy_resource
)
result = deploy_func(logical_id, resource_map_new, stack_name=stack_name)
finally:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = False
if not should_be_created:
# skip the parts below for update requests
return resource
def find_id(resource):
"""Find ID of the given resource."""
if not resource:
return
for id_attr in (
"Id",
"id",
"ResourceId",
"RestApiId",
"DeploymentId",
"RoleId",
):
if id_attr in resource:
return resource[id_attr]
# update resource IDs to avoid mismatch between CF moto and LocalStack backend resources
if hasattr(resource, "id") or (
isinstance(resource, dict) and resource.get("id")
):
existing_id = resource.id if hasattr(resource, "id") else resource["id"]
new_res_id = find_id(result)
LOG.debug(
"Updating resource id: %s - %s, %s - %s"
% (existing_id, new_res_id, resource, resource_json)
)
if new_res_id:
LOG.info(
"Updating resource ID from %s to %s (%s)"
% (existing_id, new_res_id, region_name)
)
update_resource_id(
resource,
new_res_id,
props,
region_name,
stack_name,
resources_map._resource_json_map,
)
else:
LOG.warning(
"Unable to extract id for resource %s: %s" % (logical_id, result)
)
# update physical_resource_id field
update_physical_resource_id(resource)
return resource
def update_resource_id(
resource, new_id, props, region_name, stack_name, resource_map
):
"""Update and fix the ID(s) of the given resource."""
# NOTE: this is a bit of a hack, which is required because
# of the order of events when CloudFormation resources are created.
# When we process a request to create a CF resource that's part of a
# stack, say, an API Gateway Resource, then we (1) create the object
# in memory in moto, which generates a random ID for the resource, and
# (2) create the actual resource in the backend service using
# template_deployer.deploy_resource(..) (see above).
# The resource created in (2) now has a different ID than the resource
# created in (1), which leads to downstream problems. Hence, we need
# the logic below to reconcile the ids, i.e., apply IDs from (2) to (1).
backend = apigw_models.apigateway_backends[region_name]
if isinstance(resource, apigw_models.RestAPI):
backend.apis.pop(resource.id, None)
backend.apis[new_id] = resource
# We also need to fetch the resources to replace the root resource
# that moto automatically adds to newly created RestAPI objects
client = aws_stack.connect_to_service("apigateway")
resources = client.get_resources(restApiId=new_id, limit=500)["items"]
# make sure no resources have been added in addition to the root /
assert len(resource.resources) == 1
resource.resources = {}
for res in resources:
res_path_part = res.get("pathPart") or res.get("path")
child = resource.add_child(res_path_part, res.get("parentId"))
resource.resources.pop(child.id)
child.id = res["id"]
child.api_id = new_id
resource.resources[child.id] = child
resource.id = new_id
elif isinstance(resource, apigw_models.Resource):
api_id = props["RestApiId"]
api_id = template_deployer.resolve_refs_recursively(
stack_name, api_id, resource_map
)
backend.apis[api_id].resources.pop(resource.id, None)
backend.apis[api_id].resources[new_id] = resource
resource.id = new_id
elif isinstance(resource, apigw_models.Deployment):
api_id = props["RestApiId"]
api_id = template_deployer.resolve_refs_recursively(
stack_name, api_id, resource_map
)
backend.apis[api_id].deployments.pop(resource["id"], None)
backend.apis[api_id].deployments[new_id] = resource
resource["id"] = new_id
else:
LOG.warning(
"Unexpected resource type when updating ID: %s" % type(resource)
)
parse_and_create_resource_orig = parsing.parse_and_create_resource
parsing.parse_and_create_resource = parse_and_create_resource
parse_and_update_resource_orig = parsing.parse_and_update_resource
parsing.parse_and_update_resource = parse_and_update_resource
# patch CloudFormation parse_output(..) method to fix a bug in moto
def parse_output(output_logical_id, output_json, resources_map):
try:
result = parse_output_orig(output_logical_id, output_json, resources_map)
except KeyError:
result = Output()
result.key = output_logical_id
result.value = None
result.description = output_json.get("Description")
# Make sure output includes export name
if not hasattr(result, "export_name"):
result.export_name = output_json.get("Export", {}).get("Name")
return result
parse_output_orig = parsing.parse_output
parsing.parse_output = parse_output
# Make sure the export name is returned for stack outputs
if "<ExportName>" not in responses.DESCRIBE_STACKS_TEMPLATE:
find = "</OutputValue>"
replace = """</OutputValue>
{% if output.export_name %}
<ExportName>{{ output.export_name }}</ExportName>
{% endif %}
"""
responses.DESCRIBE_STACKS_TEMPLATE = responses.DESCRIBE_STACKS_TEMPLATE.replace(
find, replace
)
# Patch CloudFormationBackend.update_stack method in moto
def make_cf_update_stack(cf_backend):
cf_update_stack_orig = cf_backend.update_stack
def cf_update_stack(self, *args, **kwargs):
stack = cf_update_stack_orig(*args, **kwargs)
# update stack exports
self._validate_export_uniqueness(stack)
for export in stack.exports:
self.exports[export.name] = export
return stack
return types.MethodType(cf_update_stack, cf_backend)
for region, cf_backend in cloudformation_backends.items():
cf_backend.update_stack = make_cf_update_stack(cf_backend)
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB_Table_get_cfn_attribute(self, attribute_name):
try:
return ddb_table_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.dynamodb_table_arn(table_name=self.name)
raise
ddb_table_get_cfn_attribute_orig = dynamodb_models.Table.get_cfn_attribute
dynamodb_models.Table.get_cfn_attribute = DynamoDB_Table_get_cfn_attribute
# Patch generate_stack_id(..) method in moto
def generate_stack_id(stack_name, region=None, **kwargs):
region = region or aws_stack.get_region()
return generate_stack_id_orig(stack_name, region=region, **kwargs)
generate_stack_id_orig = cloudformation_utils.generate_stack_id
cloudformation_utils.generate_stack_id = cloudformation_models.generate_stack_id = (
generate_stack_id
)
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB2_Table_get_cfn_attribute(self, attribute_name):
if attribute_name == "Arn":
return aws_stack.dynamodb_table_arn(table_name=self.name)
elif attribute_name == "StreamArn":
if (self.stream_specification or {}).get("StreamEnabled"):
return aws_stack.dynamodb_stream_arn(self.name, "latest")
return None
raise UnformattedGetAttTemplateException()
dynamodb2_models.Table.get_cfn_attribute = DynamoDB2_Table_get_cfn_attribute
# Patch SQS get_cfn_attribute(..) method in moto
def SQS_Queue_get_cfn_attribute(self, attribute_name):
if attribute_name in ["Arn", "QueueArn"]:
return aws_stack.sqs_queue_arn(queue_name=self.name)
return SQS_Queue_get_cfn_attribute_orig(self, attribute_name)
SQS_Queue_get_cfn_attribute_orig = sqs_models.Queue.get_cfn_attribute
sqs_models.Queue.get_cfn_attribute = SQS_Queue_get_cfn_attribute
# Patch S3 Bucket get_cfn_attribute(..) method in moto
def S3_Bucket_get_cfn_attribute(self, attribute_name):
if attribute_name in ["Arn"]:
return aws_stack.s3_bucket_arn(self.name)
return S3_Bucket_get_cfn_attribute_orig(self, attribute_name)
S3_Bucket_get_cfn_attribute_orig = s3_models.FakeBucket.get_cfn_attribute
s3_models.FakeBucket.get_cfn_attribute = S3_Bucket_get_cfn_attribute
# Patch SQS physical_resource_id(..) method in moto
@property
def SQS_Queue_physical_resource_id(self):
result = SQS_Queue_physical_resource_id_orig.fget(self)
if "://" not in result:
# convert ID to queue URL
return aws_stack.get_sqs_queue_url(result)
return result
SQS_Queue_physical_resource_id_orig = sqs_models.Queue.physical_resource_id
sqs_models.Queue.physical_resource_id = SQS_Queue_physical_resource_id
# Patch LogGroup get_cfn_attribute(..) method in moto
def LogGroup_get_cfn_attribute(self, attribute_name):
try:
return LogGroup_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.log_group_arn(self.name)
raise
LogGroup_get_cfn_attribute_orig = getattr(
cw_models.LogGroup, "get_cfn_attribute", None
)
cw_models.LogGroup.get_cfn_attribute = LogGroup_get_cfn_attribute
# Patch Lambda get_cfn_attribute(..) method in moto
def Lambda_Function_get_cfn_attribute(self, attribute_name):
try:
if attribute_name == "Arn":
return self.function_arn
return Lambda_Function_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name in ("Name", "FunctionName"):
return self.function_name
raise
Lambda_Function_get_cfn_attribute_orig = (
lambda_models.LambdaFunction.get_cfn_attribute
)
lambda_models.LambdaFunction.get_cfn_attribute = Lambda_Function_get_cfn_attribute
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB_Table_get_cfn_attribute(self, attribute_name):
try:
if attribute_name == "StreamArn":
streams = aws_stack.connect_to_service("dynamodbstreams").list_streams(
TableName=self.name
)["Streams"]
return streams[0]["StreamArn"] if streams else None
return DynamoDB_Table_get_cfn_attribute_orig(self, attribute_name)
except Exception as e:
LOG.warning(
'Unable to get attribute "%s" from resource %s: %s'
% (attribute_name, type(self), e)
)
raise
DynamoDB_Table_get_cfn_attribute_orig = dynamodb_models.Table.get_cfn_attribute
dynamodb_models.Table.get_cfn_attribute = DynamoDB_Table_get_cfn_attribute
# Patch IAM get_cfn_attribute(..) method in moto
def IAM_Role_get_cfn_attribute(self, attribute_name):
try:
return IAM_Role_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.role_arn(self.name)
raise
IAM_Role_get_cfn_attribute_orig = iam_models.Role.get_cfn_attribute
iam_models.Role.get_cfn_attribute = IAM_Role_get_cfn_attribute
# Patch IAM Role model
# https://github.com/localstack/localstack/issues/925
@property
def IAM_Role_physical_resource_id(self):
return self.name
iam_models.Role.physical_resource_id = IAM_Role_physical_resource_id
# Patch SNS Topic get_cfn_attribute(..) method in moto
def SNS_Topic_get_cfn_attribute(self, attribute_name):
result = SNS_Topic_get_cfn_attribute_orig(self, attribute_name)
if attribute_name.lower() in ["arn", "topicarn"]:
result = aws_stack.fix_account_id_in_arns(result)
return result
SNS_Topic_get_cfn_attribute_orig = sns_models.Topic.get_cfn_attribute
sns_models.Topic.get_cfn_attribute = SNS_Topic_get_cfn_attribute
# Patch ES get_cfn_attribute(..) method
def ES_get_cfn_attribute(self, attribute_name):
if attribute_name in ["Arn", "DomainArn"]:
return aws_stack.es_domain_arn(self.params.get("DomainName"))
if attribute_name == "DomainEndpoint":
if not hasattr(self, "_domain_endpoint"):
es_details = aws_stack.connect_to_service(
"es"
).describe_elasticsearch_domain(
DomainName=self.params.get("DomainName")
)
self._domain_endpoint = es_details["DomainStatus"]["Endpoint"]
return self._domain_endpoint
raise UnformattedGetAttTemplateException()
service_models.ElasticsearchDomain.get_cfn_attribute = ES_get_cfn_attribute
# Patch Firehose get_cfn_attribute(..) method
def Firehose_get_cfn_attribute(self, attribute_name):
if attribute_name == "Arn":
return aws_stack.firehose_stream_arn(self.params.get("DeliveryStreamName"))
raise UnformattedGetAttTemplateException()
service_models.FirehoseDeliveryStream.get_cfn_attribute = Firehose_get_cfn_attribute
# Patch LambdaFunction create_from_cloudformation_json(..) method in moto
@classmethod
def Lambda_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
resource_name = (
cloudformation_json.get("Properties", {}).get("FunctionName")
or resource_name
)
return Lambda_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
Lambda_create_from_cloudformation_json_orig = (
lambda_models.LambdaFunction.create_from_cloudformation_json
)
lambda_models.LambdaFunction.create_from_cloudformation_json = (
Lambda_create_from_cloudformation_json
)
# Patch EventSourceMapping create_from_cloudformation_json(..) method in moto
@classmethod
def Mapping_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json.get("Properties", {})
func_name = props.get("FunctionName") or ""
if ":lambda:" in func_name:
props["FunctionName"] = aws_stack.lambda_function_name(func_name)
return Mapping_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
Mapping_create_from_cloudformation_json_orig = (
lambda_models.EventSourceMapping.create_from_cloudformation_json
)
lambda_models.EventSourceMapping.create_from_cloudformation_json = (
Mapping_create_from_cloudformation_json
)
# Patch LambdaFunction update_from_cloudformation_json(..) method in moto
@classmethod
def Lambda_update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
resource_name = (
cloudformation_json.get("Properties", {}).get("FunctionName")
or new_resource_name
)
return Lambda_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
if not hasattr(lambda_models.LambdaFunction, "update_from_cloudformation_json"):
lambda_models.LambdaFunction.update_from_cloudformation_json = (
Lambda_update_from_cloudformation_json
)
# Patch Role update_from_cloudformation_json(..) method
@classmethod
def Role_update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
props = cloudformation_json.get("Properties", {})
original_resource.name = props.get("RoleName") or original_resource.name
original_resource.assume_role_policy_document = props.get(
"AssumeRolePolicyDocument"
)
return original_resource
if not hasattr(iam_models.Role, "update_from_cloudformation_json"):
iam_models.Role.update_from_cloudformation_json = (
Role_update_from_cloudformation_json
)
# patch ApiGateway Deployment
def depl_delete_from_cloudformation_json(resource_name, resource_json, region_name):
properties = resource_json["Properties"]
LOG.info(
"TODO: apigateway.Deployment.delete_from_cloudformation_json %s"
% properties
)
if not hasattr(apigw_models.Deployment, "delete_from_cloudformation_json"):
apigw_models.Deployment.delete_from_cloudformation_json = (
depl_delete_from_cloudformation_json
)
# patch Lambda Version
def vers_delete_from_cloudformation_json(resource_name, resource_json, region_name):
properties = resource_json["Properties"]
LOG.info(
"TODO: apigateway.Deployment.delete_from_cloudformation_json %s"
% properties
)
if not hasattr(lambda_models.LambdaVersion, "delete_from_cloudformation_json"):
lambda_models.LambdaVersion.delete_from_cloudformation_json = (
vers_delete_from_cloudformation_json
)
# add CloudFormation types
@classmethod
def RestAPI_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
name = props["Name"]
region_name = props.get("Region") or aws_stack.get_region()
description = props.get("Description") or ""
id = props.get("Id") or short_uid()
return apigw_models.RestAPI(id, region_name, name, description)
def RestAPI_get_cfn_attribute(self, attribute_name):
if attribute_name == "Id":
return self.id
if attribute_name == "Region":
return self.region_name
if attribute_name == "Name":
return self.name
if attribute_name == "Description":
return self.description
if attribute_name == "RootResourceId":
for id, resource in self.resources.items():
if resource.parent_id is None:
return resource.id
return None
raise UnformattedGetAttTemplateException()
@classmethod
def Deployment_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
name = props["StageName"]
deployment_id = props.get("Id") or short_uid()
description = props.get("Description") or ""
return apigw_models.Deployment(deployment_id, name, description)
@classmethod
def Resource_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
region_name = props.get("Region") or aws_stack.get_region()
path_part = props.get("PathPart")
api_id = props.get("RestApiId")
parent_id = props.get("ParentId")
id = props.get("Id") or short_uid()
return apigw_models.Resource(id, region_name, api_id, path_part, parent_id)
@classmethod
def Method_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
method_type = props.get("HttpMethod")
authorization_type = props.get("AuthorizationType")
return apigw_models.Method(method_type, authorization_type)
apigw_models.RestAPI.create_from_cloudformation_json = (
RestAPI_create_from_cloudformation_json
)
apigw_models.RestAPI.get_cfn_attribute = RestAPI_get_cfn_attribute
apigw_models.Deployment.create_from_cloudformation_json = (
Deployment_create_from_cloudformation_json
)
apigw_models.Resource.create_from_cloudformation_json = (
Resource_create_from_cloudformation_json
)
apigw_models.Method.create_from_cloudformation_json = (
Method_create_from_cloudformation_json
)
# TODO: add support for AWS::ApiGateway::Model, AWS::ApiGateway::RequestValidator, ...
# fix AttributeError in moto's CloudFormation describe_stack_resource
def describe_stack_resource(self):
stack_name = self._get_param("StackName")
stack = self.cloudformation_backend.get_stack(stack_name)
logical_resource_id = self._get_param("LogicalResourceId")
if not stack:
msg = 'Unable to find CloudFormation stack "%s" in region %s' % (
stack_name,
aws_stack.get_region(),
)
if aws_stack.get_region() != self.region:
msg = "%s/%s" % (msg, self.region)
LOG.warning(msg)
response = aws_responses.flask_error_response(
msg, code=404, error_type="ResourceNotFoundException"
)
return 404, response.headers, response.data
for stack_resource in stack.stack_resources:
# Note: Line below has been patched
# if stack_resource.logical_resource_id == logical_resource_id:
if (
stack_resource
and stack_resource.logical_resource_id == logical_resource_id
):
resource = stack_resource
break
else:
raise ValidationError(logical_resource_id)
template = self.response_template(
responses.DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE
)
return template.render(stack=stack, resource=resource)
responses.CloudFormationResponse.describe_stack_resource = describe_stack_resource
# fix moto's describe_stack_events jinja2.exceptions.UndefinedError
def cf_describe_stack_events(self):
stack_name = self._get_param("StackName")
backend = self.cloudformation_backend
stack = backend.get_stack(stack_name)
if not stack:
# Also return stack events for deleted stacks, specified by stack name
stack = (
[
stk
for id, stk in backend.deleted_stacks.items()
if stk.name == stack_name
]
or [0]
)[0]
if not stack:
raise ValidationError(
stack_name,
message='Unable to find stack "%s" in region %s'
% (stack_name, aws_stack.get_region()),
)
template = self.response_template(responses.DESCRIBE_STACK_EVENTS_RESPONSE)
return template.render(stack=stack)
responses.CloudFormationResponse.describe_stack_events = cf_describe_stack_events
# fix Lambda regions in moto - see https://github.com/localstack/localstack/issues/1961
for region in boto3.session.Session().get_available_regions("lambda"):
if region not in lambda_models.lambda_backends:
lambda_models.lambda_backends[region] = lambda_models.LambdaBackend(region)
# patch FakeStack.initialize_resources
def run_dependencies_deployment_loop(stack, action):
def set_status(status):
stack._add_stack_event(status)
stack.status = status
def run_loop(*args):
# NOTE: We're adding this additional loop, as it seems that in some cases moto
# does not consider resource dependencies (e.g., if a "DependsOn" resource property
# is defined). This loop allows us to incrementally resolve such dependencies.
resource_map = stack.resource_map
unresolved = {}
for i in range(MAX_DEPENDENCY_DEPTH):
LOG.debug(
"Running CloudFormation stack deployment loop iteration %s"
% (i + 1)
)
unresolved = getattr(resource_map, "_unresolved_resources", {})
if not unresolved:
set_status("%s_COMPLETE" % action)
return resource_map
resource_map._unresolved_resources = {}
for resource_id, resource_details in unresolved.items():
# Re-trigger the resource creation
parse_and_create_resource(*resource_details, force_create=True)
if unresolved.keys() == resource_map._unresolved_resources.keys():
# looks like no more resources can be resolved -> bail
LOG.warning(
"Unresolvable dependencies, there may be undeployed stack resources: %s"
% unresolved
)
break
set_status("%s_FAILED" % action)
raise Exception(
"Unable to resolve all CloudFormation resources after traversing "
+ "dependency tree (maximum depth %s reached): %s"
% (MAX_DEPENDENCY_DEPTH, list(unresolved.keys()))
)
# NOTE: We're running the loop in the background, as it might take some time to complete
FuncThread(run_loop).start()
def initialize_resources(self):
self.resource_map._template = self.resource_map._template or self.template_dict
self.resource_map.load()
self.resource_map.create(self.template_dict)
self.output_map.create()
run_dependencies_deployment_loop(self, "CREATE")
def update(self, *args, **kwargs):
stack_update_orig(self, *args, **kwargs)
run_dependencies_deployment_loop(self, "UPDATE")
FakeStack.initialize_resources = initialize_resources
stack_update_orig = FakeStack.update
FakeStack.update = update
# patch Kinesis Stream get_cfn_attribute(..) method in moto
def Kinesis_Stream_get_cfn_attribute(self, attribute_name):
if attribute_name == "Arn":
return self.arn
raise UnformattedGetAttTemplateException()
kinesis_models.Stream.get_cfn_attribute = Kinesis_Stream_get_cfn_attribute
# patch cloudformation backend create_change_set(..)
# #760 cloudformation deploy invalid xml error
cloudformation_backend_create_change_set_orig = (
CloudFormationBackend.create_change_set
)
def cloudformation_backend_create_change_set(
self,
stack_name,
change_set_name,
template,
parameters,
region_name,
change_set_type,
notification_arns=None,
tags=None,
role_arn=None,
):
change_set_id, _ = cloudformation_backend_create_change_set_orig(
self,
stack_name,
change_set_name,
template,
parameters,
region_name,
change_set_type,
notification_arns,
tags,
role_arn,
)
change_set = self.change_sets[change_set_id]
change_set.status = "CREATE_COMPLETE"
return change_set_id, _
CloudFormationBackend.create_change_set = cloudformation_backend_create_change_set
|
def apply_patches():
"""Apply patches to make LocalStack seamlessly interact with the moto backend.
TODO: Eventually, these patches should be contributed to the upstream repo!"""
# add model mappings to moto
parsing.MODEL_MAP.update(MODEL_MAP)
# Patch clean_json in moto
def clean_json(resource_json, resources_map):
result = clean_json_orig(resource_json, resources_map)
if isinstance(result, BaseModel):
if isinstance(resource_json, dict) and "Ref" in resource_json:
entity_id = get_entity_id(result, resource_json)
if entity_id:
return entity_id
LOG.warning(
'Unable to resolve "Ref" attribute for: %s - %s - %s',
resource_json,
result,
type(result),
)
return result
clean_json_orig = parsing.clean_json
parsing.clean_json = clean_json
# Patch parse_and_create_resource method in moto to deploy resources in LocalStack
def parse_and_create_resource(
logical_id, resource_json, resources_map, region_name, force_create=False
):
try:
return _parse_and_create_resource(
logical_id,
resource_json,
resources_map,
region_name,
force_create=force_create,
)
except Exception as e:
LOG.error(
'Unable to parse and create resource "%s": %s %s'
% (logical_id, e, traceback.format_exc())
)
raise
def parse_and_update_resource(
logical_id, resource_json, resources_map, region_name
):
try:
return _parse_and_create_resource(
logical_id, resource_json, resources_map, region_name, update=True
)
except Exception as e:
LOG.error(
'Unable to parse and update resource "%s": %s %s'
% (logical_id, e, traceback.format_exc())
)
raise
def _parse_and_create_resource(
logical_id,
resource_json,
resources_map,
region_name,
update=False,
force_create=False,
):
stack_name = resources_map.get("AWS::StackName")
resource_hash_key = (stack_name, logical_id)
props = resource_json["Properties"] = resource_json.get("Properties") or {}
# If the current stack is being updated, avoid infinite recursion
updating = CURRENTLY_UPDATING_RESOURCES.get(resource_hash_key)
LOG.debug(
"Currently processing stack resource %s/%s: %s"
% (stack_name, logical_id, updating)
)
if updating:
return None
# parse and get final resource JSON
resource_tuple = parsing.parse_resource(
logical_id, resource_json, resources_map
)
if not resource_tuple:
return None
_, resource_json, _ = resource_tuple
def add_default_props(resource_props):
"""apply some fixes which otherwise cause deployments to fail"""
res_type = resource_props["Type"]
props = resource_props.get("Properties", {})
if res_type == "AWS::Lambda::EventSourceMapping" and not props.get(
"StartingPosition"
):
props["StartingPosition"] = "LATEST"
# generate default names for certain resource types
default_attrs = (
("AWS::IAM::Role", "RoleName"),
("AWS::Events::Rule", "Name"),
)
for entry in default_attrs:
if res_type == entry[0] and not props.get(entry[1]):
props[entry[1]] = "cf-%s-%s" % (
stack_name,
md5(canonical_json(props)),
)
# add some fixes and default props which otherwise cause deployments to fail
add_default_props(resource_json)
for resource in resources_map._resource_json_map.values():
add_default_props(resource)
# check if this resource already exists in the resource map
resource = resources_map._parsed_resources.get(logical_id)
if resource and not update and not force_create:
return resource
# fix resource ARNs, make sure to convert account IDs 000000000000 to 123456789012
resource_json_arns_fixed = clone(json_safe(convert_objs_to_ids(resource_json)))
set_moto_account_ids(resource_json_arns_fixed)
# create resource definition and store CloudFormation metadata in moto
moto_create_error = None
if (resource or update) and not force_create:
parse_and_update_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
elif not resource:
try:
resource = parse_and_create_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
resource.logical_id = logical_id
except Exception as e:
moto_create_error = e
# check whether this resource needs to be deployed
resource_map_new = dict(resources_map._resource_json_map)
resource_map_new[logical_id] = resource_json
should_be_created = template_deployer.should_be_deployed(
logical_id, resource_map_new, stack_name
)
# check for moto creation errors and raise an exception if needed
if moto_create_error:
if should_be_created:
raise moto_create_error
else:
LOG.info(
"Error on moto CF resource creation. Ignoring, as should_be_created=%s: %s"
% (should_be_created, moto_create_error)
)
# Fix for moto which sometimes hard-codes region name as 'us-east-1'
if hasattr(resource, "region_name") and resource.region_name != region_name:
LOG.debug(
"Updating incorrect region from %s to %s"
% (resource.region_name, region_name)
)
resource.region_name = region_name
# check whether this resource needs to be deployed
is_updateable = False
if not should_be_created:
# This resource is either not deployable or already exists. Check if it can be updated
is_updateable = template_deployer.is_updateable(
logical_id, resource_map_new, stack_name
)
if not update or not is_updateable:
all_satisfied = template_deployer.all_resource_dependencies_satisfied(
logical_id, resource_map_new, stack_name
)
if not all_satisfied:
LOG.info(
"Resource %s cannot be deployed, found unsatisfied dependencies. %s"
% (logical_id, resource_json)
)
details = [logical_id, resource_json, resources_map, region_name]
resources_map._unresolved_resources = getattr(
resources_map, "_unresolved_resources", {}
)
resources_map._unresolved_resources[logical_id] = details
else:
LOG.debug(
"Resource %s need not be deployed (is_updateable=%s): %s %s"
% (logical_id, is_updateable, resource_json, bool(resource))
)
# Return if this resource already exists and can/need not be updated yet
# NOTE: We should always return the resource here, to avoid duplicate
# creation of resources in moto!
return resource
# Apply some fixes/patches to the resource names, then deploy resource in LocalStack
update_resource_name(resource, resource_json)
LOG.debug(
"Deploying CloudFormation resource (update=%s, exists=%s, updateable=%s): %s"
% (update, not should_be_created, is_updateable, resource_json)
)
try:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = True
deploy_func = (
template_deployer.update_resource
if update
else template_deployer.deploy_resource
)
result = deploy_func(logical_id, resource_map_new, stack_name=stack_name)
finally:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = False
if not should_be_created:
# skip the parts below for update requests
return resource
def find_id(resource):
"""Find ID of the given resource."""
if not resource:
return
for id_attr in (
"Id",
"id",
"ResourceId",
"RestApiId",
"DeploymentId",
"RoleId",
):
if id_attr in resource:
return resource[id_attr]
# update resource IDs to avoid mismatch between CF moto and LocalStack backend resources
if hasattr(resource, "id") or (
isinstance(resource, dict) and resource.get("id")
):
existing_id = resource.id if hasattr(resource, "id") else resource["id"]
new_res_id = find_id(result)
LOG.debug(
"Updating resource id: %s - %s, %s - %s"
% (existing_id, new_res_id, resource, resource_json)
)
if new_res_id:
LOG.info(
"Updating resource ID from %s to %s (%s)"
% (existing_id, new_res_id, region_name)
)
update_resource_id(
resource,
new_res_id,
props,
region_name,
stack_name,
resources_map._resource_json_map,
)
else:
LOG.warning(
"Unable to extract id for resource %s: %s" % (logical_id, result)
)
# update physical_resource_id field
update_physical_resource_id(resource)
return resource
def update_resource_id(
resource, new_id, props, region_name, stack_name, resource_map
):
"""Update and fix the ID(s) of the given resource."""
# NOTE: this is a bit of a hack, which is required because
# of the order of events when CloudFormation resources are created.
# When we process a request to create a CF resource that's part of a
# stack, say, an API Gateway Resource, then we (1) create the object
# in memory in moto, which generates a random ID for the resource, and
# (2) create the actual resource in the backend service using
# template_deployer.deploy_resource(..) (see above).
# The resource created in (2) now has a different ID than the resource
# created in (1), which leads to downstream problems. Hence, we need
# the logic below to reconcile the ids, i.e., apply IDs from (2) to (1).
backend = apigw_models.apigateway_backends[region_name]
if isinstance(resource, apigw_models.RestAPI):
backend.apis.pop(resource.id, None)
backend.apis[new_id] = resource
# We also need to fetch the resources to replace the root resource
# that moto automatically adds to newly created RestAPI objects
client = aws_stack.connect_to_service("apigateway")
resources = client.get_resources(restApiId=new_id, limit=500)["items"]
# make sure no resources have been added in addition to the root /
assert len(resource.resources) == 1
resource.resources = {}
for res in resources:
res_path_part = res.get("pathPart") or res.get("path")
child = resource.add_child(res_path_part, res.get("parentId"))
resource.resources.pop(child.id)
child.id = res["id"]
child.api_id = new_id
resource.resources[child.id] = child
resource.id = new_id
elif isinstance(resource, apigw_models.Resource):
api_id = props["RestApiId"]
api_id = template_deployer.resolve_refs_recursively(
stack_name, api_id, resource_map
)
backend.apis[api_id].resources.pop(resource.id, None)
backend.apis[api_id].resources[new_id] = resource
resource.id = new_id
elif isinstance(resource, apigw_models.Deployment):
api_id = props["RestApiId"]
api_id = template_deployer.resolve_refs_recursively(
stack_name, api_id, resource_map
)
backend.apis[api_id].deployments.pop(resource["id"], None)
backend.apis[api_id].deployments[new_id] = resource
resource["id"] = new_id
else:
LOG.warning(
"Unexpected resource type when updating ID: %s" % type(resource)
)
parse_and_create_resource_orig = parsing.parse_and_create_resource
parsing.parse_and_create_resource = parse_and_create_resource
parse_and_update_resource_orig = parsing.parse_and_update_resource
parsing.parse_and_update_resource = parse_and_update_resource
# patch CloudFormation parse_output(..) method to fix a bug in moto
def parse_output(output_logical_id, output_json, resources_map):
try:
result = parse_output_orig(output_logical_id, output_json, resources_map)
except KeyError:
result = Output()
result.key = output_logical_id
result.value = None
result.description = output_json.get("Description")
# Make sure output includes export name
if not hasattr(result, "export_name"):
result.export_name = output_json.get("Export", {}).get("Name")
return result
parse_output_orig = parsing.parse_output
parsing.parse_output = parse_output
# Make sure the export name is returned for stack outputs
if "<ExportName>" not in responses.DESCRIBE_STACKS_TEMPLATE:
find = "</OutputValue>"
replace = """</OutputValue>
{% if output.export_name %}
<ExportName>{{ output.export_name }}</ExportName>
{% endif %}
"""
responses.DESCRIBE_STACKS_TEMPLATE = responses.DESCRIBE_STACKS_TEMPLATE.replace(
find, replace
)
# Patch CloudFormationBackend.update_stack method in moto
def make_cf_update_stack(cf_backend):
cf_update_stack_orig = cf_backend.update_stack
def cf_update_stack(self, *args, **kwargs):
stack = cf_update_stack_orig(*args, **kwargs)
# update stack exports
self._validate_export_uniqueness(stack)
for export in stack.exports:
self.exports[export.name] = export
return stack
return types.MethodType(cf_update_stack, cf_backend)
for region, cf_backend in cloudformation_backends.items():
cf_backend.update_stack = make_cf_update_stack(cf_backend)
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB_Table_get_cfn_attribute(self, attribute_name):
try:
return ddb_table_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.dynamodb_table_arn(table_name=self.name)
raise
ddb_table_get_cfn_attribute_orig = dynamodb_models.Table.get_cfn_attribute
dynamodb_models.Table.get_cfn_attribute = DynamoDB_Table_get_cfn_attribute
# Patch generate_stack_id(..) method in moto
def generate_stack_id(stack_name, region=None, **kwargs):
region = region or aws_stack.get_region()
return generate_stack_id_orig(stack_name, region=region, **kwargs)
generate_stack_id_orig = cloudformation_utils.generate_stack_id
cloudformation_utils.generate_stack_id = cloudformation_models.generate_stack_id = (
generate_stack_id
)
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB2_Table_get_cfn_attribute(self, attribute_name):
if attribute_name == "Arn":
return aws_stack.dynamodb_table_arn(table_name=self.name)
elif attribute_name == "StreamArn":
if (self.stream_specification or {}).get("StreamEnabled"):
return aws_stack.dynamodb_stream_arn(self.name, "latest")
return None
raise UnformattedGetAttTemplateException()
dynamodb2_models.Table.get_cfn_attribute = DynamoDB2_Table_get_cfn_attribute
# Patch SQS get_cfn_attribute(..) method in moto
def SQS_Queue_get_cfn_attribute(self, attribute_name):
if attribute_name in ["Arn", "QueueArn"]:
return aws_stack.sqs_queue_arn(queue_name=self.name)
return SQS_Queue_get_cfn_attribute_orig(self, attribute_name)
SQS_Queue_get_cfn_attribute_orig = sqs_models.Queue.get_cfn_attribute
sqs_models.Queue.get_cfn_attribute = SQS_Queue_get_cfn_attribute
# Patch S3 Bucket get_cfn_attribute(..) method in moto
def S3_Bucket_get_cfn_attribute(self, attribute_name):
if attribute_name in ["Arn"]:
return aws_stack.s3_bucket_arn(self.name)
return S3_Bucket_get_cfn_attribute_orig(self, attribute_name)
S3_Bucket_get_cfn_attribute_orig = s3_models.FakeBucket.get_cfn_attribute
s3_models.FakeBucket.get_cfn_attribute = S3_Bucket_get_cfn_attribute
# Patch SQS physical_resource_id(..) method in moto
@property
def SQS_Queue_physical_resource_id(self):
result = SQS_Queue_physical_resource_id_orig.fget(self)
if "://" not in result:
# convert ID to queue URL
return aws_stack.get_sqs_queue_url(result)
return result
SQS_Queue_physical_resource_id_orig = sqs_models.Queue.physical_resource_id
sqs_models.Queue.physical_resource_id = SQS_Queue_physical_resource_id
# Patch LogGroup get_cfn_attribute(..) method in moto
def LogGroup_get_cfn_attribute(self, attribute_name):
try:
return LogGroup_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.log_group_arn(self.name)
raise
LogGroup_get_cfn_attribute_orig = getattr(
cw_models.LogGroup, "get_cfn_attribute", None
)
cw_models.LogGroup.get_cfn_attribute = LogGroup_get_cfn_attribute
# Patch Lambda get_cfn_attribute(..) method in moto
def Lambda_Function_get_cfn_attribute(self, attribute_name):
try:
if attribute_name == "Arn":
return self.function_arn
return Lambda_Function_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name in ("Name", "FunctionName"):
return self.function_name
raise
Lambda_Function_get_cfn_attribute_orig = (
lambda_models.LambdaFunction.get_cfn_attribute
)
lambda_models.LambdaFunction.get_cfn_attribute = Lambda_Function_get_cfn_attribute
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB_Table_get_cfn_attribute(self, attribute_name):
try:
if attribute_name == "StreamArn":
streams = aws_stack.connect_to_service("dynamodbstreams").list_streams(
TableName=self.name
)["Streams"]
return streams[0]["StreamArn"] if streams else None
return DynamoDB_Table_get_cfn_attribute_orig(self, attribute_name)
except Exception as e:
LOG.warning(
'Unable to get attribute "%s" from resource %s: %s'
% (attribute_name, type(self), e)
)
raise
DynamoDB_Table_get_cfn_attribute_orig = dynamodb_models.Table.get_cfn_attribute
dynamodb_models.Table.get_cfn_attribute = DynamoDB_Table_get_cfn_attribute
# Patch IAM get_cfn_attribute(..) method in moto
def IAM_Role_get_cfn_attribute(self, attribute_name):
try:
return IAM_Role_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.role_arn(self.name)
raise
IAM_Role_get_cfn_attribute_orig = iam_models.Role.get_cfn_attribute
iam_models.Role.get_cfn_attribute = IAM_Role_get_cfn_attribute
# Patch IAM Role model
# https://github.com/localstack/localstack/issues/925
@property
def IAM_Role_physical_resource_id(self):
return self.name
iam_models.Role.physical_resource_id = IAM_Role_physical_resource_id
# Patch SNS Topic get_cfn_attribute(..) method in moto
def SNS_Topic_get_cfn_attribute(self, attribute_name):
result = SNS_Topic_get_cfn_attribute_orig(self, attribute_name)
if attribute_name.lower() in ["arn", "topicarn"]:
result = aws_stack.fix_account_id_in_arns(result)
return result
SNS_Topic_get_cfn_attribute_orig = sns_models.Topic.get_cfn_attribute
sns_models.Topic.get_cfn_attribute = SNS_Topic_get_cfn_attribute
# Patch ES get_cfn_attribute(..) method
def ES_get_cfn_attribute(self, attribute_name):
if attribute_name in ["Arn", "DomainArn"]:
return aws_stack.es_domain_arn(self.params.get("DomainName"))
if attribute_name == "DomainEndpoint":
if not hasattr(self, "_domain_endpoint"):
es_details = aws_stack.connect_to_service(
"es"
).describe_elasticsearch_domain(
DomainName=self.params.get("DomainName")
)
self._domain_endpoint = es_details["DomainStatus"]["Endpoint"]
return self._domain_endpoint
raise UnformattedGetAttTemplateException()
service_models.ElasticsearchDomain.get_cfn_attribute = ES_get_cfn_attribute
# Patch Firehose get_cfn_attribute(..) method
def Firehose_get_cfn_attribute(self, attribute_name):
if attribute_name == "Arn":
return aws_stack.firehose_stream_arn(self.params.get("DeliveryStreamName"))
raise UnformattedGetAttTemplateException()
service_models.FirehoseDeliveryStream.get_cfn_attribute = Firehose_get_cfn_attribute
# Patch LambdaFunction create_from_cloudformation_json(..) method in moto
@classmethod
def Lambda_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
resource_name = (
cloudformation_json.get("Properties", {}).get("FunctionName")
or resource_name
)
return Lambda_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
Lambda_create_from_cloudformation_json_orig = (
lambda_models.LambdaFunction.create_from_cloudformation_json
)
lambda_models.LambdaFunction.create_from_cloudformation_json = (
Lambda_create_from_cloudformation_json
)
# Patch EventSourceMapping create_from_cloudformation_json(..) method in moto
@classmethod
def Mapping_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json.get("Properties", {})
func_name = props.get("FunctionName") or ""
if ":lambda:" in func_name:
props["FunctionName"] = aws_stack.lambda_function_name(func_name)
return Mapping_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
Mapping_create_from_cloudformation_json_orig = (
lambda_models.EventSourceMapping.create_from_cloudformation_json
)
lambda_models.EventSourceMapping.create_from_cloudformation_json = (
Mapping_create_from_cloudformation_json
)
# Patch LambdaFunction update_from_cloudformation_json(..) method in moto
@classmethod
def Lambda_update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
resource_name = (
cloudformation_json.get("Properties", {}).get("FunctionName")
or new_resource_name
)
return Lambda_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
if not hasattr(lambda_models.LambdaFunction, "update_from_cloudformation_json"):
lambda_models.LambdaFunction.update_from_cloudformation_json = (
Lambda_update_from_cloudformation_json
)
# Patch Role update_from_cloudformation_json(..) method
@classmethod
def Role_update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
props = cloudformation_json.get("Properties", {})
original_resource.name = props.get("RoleName") or original_resource.name
original_resource.assume_role_policy_document = props.get(
"AssumeRolePolicyDocument"
)
return original_resource
if not hasattr(iam_models.Role, "update_from_cloudformation_json"):
iam_models.Role.update_from_cloudformation_json = (
Role_update_from_cloudformation_json
)
# patch ApiGateway Deployment
def depl_delete_from_cloudformation_json(resource_name, resource_json, region_name):
properties = resource_json["Properties"]
LOG.info(
"TODO: apigateway.Deployment.delete_from_cloudformation_json %s"
% properties
)
if not hasattr(apigw_models.Deployment, "delete_from_cloudformation_json"):
apigw_models.Deployment.delete_from_cloudformation_json = (
depl_delete_from_cloudformation_json
)
# patch Lambda Version
def vers_delete_from_cloudformation_json(resource_name, resource_json, region_name):
properties = resource_json["Properties"]
LOG.info(
"TODO: apigateway.Deployment.delete_from_cloudformation_json %s"
% properties
)
if not hasattr(lambda_models.LambdaVersion, "delete_from_cloudformation_json"):
lambda_models.LambdaVersion.delete_from_cloudformation_json = (
vers_delete_from_cloudformation_json
)
# add CloudFormation types
@classmethod
def RestAPI_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
name = props["Name"]
region_name = props.get("Region") or aws_stack.get_region()
description = props.get("Description") or ""
id = props.get("Id") or short_uid()
return apigw_models.RestAPI(id, region_name, name, description)
def RestAPI_get_cfn_attribute(self, attribute_name):
if attribute_name == "Id":
return self.id
if attribute_name == "Region":
return self.region_name
if attribute_name == "Name":
return self.name
if attribute_name == "Description":
return self.description
if attribute_name == "RootResourceId":
for id, resource in self.resources.items():
if resource.parent_id is None:
return resource.id
return None
raise UnformattedGetAttTemplateException()
@classmethod
def Deployment_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
name = props["StageName"]
deployment_id = props.get("Id") or short_uid()
description = props.get("Description") or ""
return apigw_models.Deployment(deployment_id, name, description)
@classmethod
def Resource_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
region_name = props.get("Region") or aws_stack.get_region()
path_part = props.get("PathPart")
api_id = props.get("RestApiId")
parent_id = props.get("ParentId")
id = props.get("Id") or short_uid()
return apigw_models.Resource(id, region_name, api_id, path_part, parent_id)
@classmethod
def Method_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
method_type = props.get("HttpMethod")
authorization_type = props.get("AuthorizationType")
return apigw_models.Method(method_type, authorization_type)
apigw_models.RestAPI.create_from_cloudformation_json = (
RestAPI_create_from_cloudformation_json
)
apigw_models.RestAPI.get_cfn_attribute = RestAPI_get_cfn_attribute
apigw_models.Deployment.create_from_cloudformation_json = (
Deployment_create_from_cloudformation_json
)
apigw_models.Resource.create_from_cloudformation_json = (
Resource_create_from_cloudformation_json
)
apigw_models.Method.create_from_cloudformation_json = (
Method_create_from_cloudformation_json
)
# TODO: add support for AWS::ApiGateway::Model, AWS::ApiGateway::RequestValidator, ...
# fix AttributeError in moto's CloudFormation describe_stack_resource
def describe_stack_resource(self):
stack_name = self._get_param("StackName")
stack = self.cloudformation_backend.get_stack(stack_name)
logical_resource_id = self._get_param("LogicalResourceId")
if not stack:
msg = 'Unable to find CloudFormation stack "%s" in region %s' % (
stack_name,
aws_stack.get_region(),
)
if aws_stack.get_region() != self.region:
msg = "%s/%s" % (msg, self.region)
LOG.warning(msg)
response = aws_responses.flask_error_response(
msg, code=404, error_type="ResourceNotFoundException"
)
return 404, response.headers, response.data
for stack_resource in stack.stack_resources:
# Note: Line below has been patched
# if stack_resource.logical_resource_id == logical_resource_id:
if (
stack_resource
and stack_resource.logical_resource_id == logical_resource_id
):
resource = stack_resource
break
else:
raise ValidationError(logical_resource_id)
template = self.response_template(
responses.DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE
)
return template.render(stack=stack, resource=resource)
responses.CloudFormationResponse.describe_stack_resource = describe_stack_resource
# fix moto's describe_stack_events jinja2.exceptions.UndefinedError
def cf_describe_stack_events(self):
stack_name = self._get_param("StackName")
backend = self.cloudformation_backend
stack = backend.get_stack(stack_name)
if not stack:
# Also return stack events for deleted stacks, specified by stack name
stack = (
[
stk
for id, stk in backend.deleted_stacks.items()
if stk.name == stack_name
]
or [0]
)[0]
if not stack:
raise ValidationError(
stack_name,
message='Unable to find stack "%s" in region %s'
% (stack_name, aws_stack.get_region()),
)
template = self.response_template(responses.DESCRIBE_STACK_EVENTS_RESPONSE)
return template.render(stack=stack)
responses.CloudFormationResponse.describe_stack_events = cf_describe_stack_events
# fix Lambda regions in moto - see https://github.com/localstack/localstack/issues/1961
for region in boto3.session.Session().get_available_regions("lambda"):
if region not in lambda_models.lambda_backends:
lambda_models.lambda_backends[region] = lambda_models.LambdaBackend(region)
# patch FakeStack.initialize_resources
def run_dependencies_deployment_loop(stack, action):
def set_status(status):
stack._add_stack_event(status)
stack.status = status
def run_loop(*args):
# NOTE: We're adding this additional loop, as it seems that in some cases moto
# does not consider resource dependencies (e.g., if a "DependsOn" resource property
# is defined). This loop allows us to incrementally resolve such dependencies.
resource_map = stack.resource_map
unresolved = {}
for i in range(MAX_DEPENDENCY_DEPTH):
LOG.debug(
"Running CloudFormation stack deployment loop iteration %s"
% (i + 1)
)
unresolved = getattr(resource_map, "_unresolved_resources", {})
if not unresolved:
set_status("%s_COMPLETE" % action)
return resource_map
resource_map._unresolved_resources = {}
for resource_id, resource_details in unresolved.items():
# Re-trigger the resource creation
parse_and_create_resource(*resource_details, force_create=True)
if unresolved.keys() == resource_map._unresolved_resources.keys():
# looks like no more resources can be resolved -> bail
LOG.warning(
"Unresolvable dependencies, there may be undeployed stack resources: %s"
% unresolved
)
break
set_status("%s_FAILED" % action)
raise Exception(
"Unable to resolve all CloudFormation resources after traversing "
+ "dependency tree (maximum depth %s reached): %s"
% (MAX_DEPENDENCY_DEPTH, list(unresolved.keys()))
)
# NOTE: We're running the loop in the background, as it might take some time to complete
FuncThread(run_loop).start()
def initialize_resources(self):
self.resource_map._template = self.resource_map._template or self.template_dict
self.resource_map.load()
self.resource_map.create(self.template_dict)
self.output_map.create()
run_dependencies_deployment_loop(self, "CREATE")
def update(self, *args, **kwargs):
stack_update_orig(self, *args, **kwargs)
run_dependencies_deployment_loop(self, "UPDATE")
FakeStack.initialize_resources = initialize_resources
stack_update_orig = FakeStack.update
FakeStack.update = update
# patch Kinesis Stream get_cfn_attribute(..) method in moto
def Kinesis_Stream_get_cfn_attribute(self, attribute_name):
if attribute_name == "Arn":
return self.arn
raise UnformattedGetAttTemplateException()
kinesis_models.Stream.get_cfn_attribute = Kinesis_Stream_get_cfn_attribute
# patch cloudformation backend create_change_set(..)
# #760 cloudformation deploy invalid xml error
cloudformation_backend_create_change_set_orig = (
CloudFormationBackend.create_change_set
)
def cloudformation_backend_create_change_set(
self,
stack_name,
change_set_name,
template,
parameters,
region_name,
change_set_type,
notification_arns=None,
tags=None,
role_arn=None,
):
change_set_id, _ = cloudformation_backend_create_change_set_orig(
self,
stack_name,
change_set_name,
template,
parameters,
region_name,
change_set_type,
notification_arns,
tags,
role_arn,
)
change_set = self.change_sets[change_set_id]
change_set.status = "CREATE_COMPLETE"
return change_set_id, _
CloudFormationBackend.create_change_set = cloudformation_backend_create_change_set
|
https://github.com/localstack/localstack/issues/2083
|
localstack_1 | 2020-02-24T14:57:32:ERROR:localstack.services.cloudformation.cloudformation_starter: Unable to parse and create resource "MyTestSubscription": Parameter validation failed:
localstack_1 | Missing required parameter in input: "TopicArn" Traceback (most recent call last):
localstack_1 | File "/opt/code/localstack/localstack/services/cloudformation/cloudformation_starter.py", line 185, in parse_and_create_resource
localstack_1 | return _parse_and_create_resource(logical_id, resource_json,
localstack_1 | File "/opt/code/localstack/localstack/services/cloudformation/cloudformation_starter.py", line 287, in _parse_and_create_resource
localstack_1 | result = deploy_func(logical_id, resource_map_new, stack_name=stack_name)
localstack_1 | File "/opt/code/localstack/localstack/utils/cloudformation/template_deployer.py", line 823, in deploy_resource
localstack_1 | return execute_resource_action(resource_id, resources, stack_name, ACTION_CREATE)
localstack_1 | File "/opt/code/localstack/localstack/utils/cloudformation/template_deployer.py", line 849, in execute_resource_action
localstack_1 | result = configure_resource_via_sdk(resource_id, resources, resource_type, func, stack_name)
localstack_1 | File "/opt/code/localstack/localstack/utils/cloudformation/template_deployer.py", line 922, in configure_resource_via_sdk
localstack_1 | raise e
localstack_1 | File "/opt/code/localstack/localstack/utils/cloudformation/template_deployer.py", line 919, in configure_resource_via_sdk
localstack_1 | result = function(**params)
localstack_1 | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/botocore/client.py", line 316, in _api_call
localstack_1 | return self._make_api_call(operation_name, kwargs)
localstack_1 | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/botocore/client.py", line 598, in _make_api_call
localstack_1 | request_dict = self._convert_to_request_dict(
localstack_1 | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/botocore/client.py", line 646, in _convert_to_request_dict
localstack_1 | request_dict = self._serializer.serialize_to_request(
localstack_1 | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/botocore/validate.py", line 297, in serialize_to_request
localstack_1 | raise ParamValidationError(report=report.generate_report())
localstack_1 | botocore.exceptions.ParamValidationError: Parameter validation failed:
localstack_1 | Missing required parameter in input: "TopicArn"
|
ParamValidationError
|
def _parse_and_create_resource(
logical_id,
resource_json,
resources_map,
region_name,
update=False,
force_create=False,
):
stack_name = resources_map.get("AWS::StackName")
resource_hash_key = (stack_name, logical_id)
props = resource_json["Properties"] = resource_json.get("Properties") or {}
# If the current stack is being updated, avoid infinite recursion
updating = CURRENTLY_UPDATING_RESOURCES.get(resource_hash_key)
LOG.debug(
"Currently processing stack resource %s/%s: %s"
% (stack_name, logical_id, updating)
)
if updating:
return None
# parse and get final resource JSON
resource_tuple = parsing.parse_resource(logical_id, resource_json, resources_map)
if not resource_tuple:
return None
_, resource_json, _ = resource_tuple
def add_default_props(resource_props):
"""apply some fixes which otherwise cause deployments to fail"""
res_type = resource_props["Type"]
props = resource_props.get("Properties", {})
if res_type == "AWS::Lambda::EventSourceMapping" and not props.get(
"StartingPosition"
):
props["StartingPosition"] = "LATEST"
# generate default names for certain resource types
default_attrs = (("AWS::IAM::Role", "RoleName"), ("AWS::Events::Rule", "Name"))
for entry in default_attrs:
if res_type == entry[0] and not props.get(entry[1]):
props[entry[1]] = "cf-%s-%s" % (stack_name, md5(canonical_json(props)))
# add some fixes and default props which otherwise cause deployments to fail
add_default_props(resource_json)
for resource in resources_map._resource_json_map.values():
add_default_props(resource)
# check if this resource already exists in the resource map
resource = resources_map._parsed_resources.get(logical_id)
if resource and not update and not force_create:
return resource
# fix resource ARNs, make sure to convert account IDs 000000000000 to 123456789012
resource_json_arns_fixed = clone(json_safe(convert_objs_to_ids(resource_json)))
set_moto_account_ids(resource_json_arns_fixed)
# create resource definition and store CloudFormation metadata in moto
moto_create_error = None
if (resource or update) and not force_create:
parse_and_update_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
elif not resource:
try:
resource = parse_and_create_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
if not resource:
# this can happen if the resource has an associated Condition which evaluates to false
return resource
resource.logical_id = logical_id
except Exception as e:
moto_create_error = e
# check whether this resource needs to be deployed
resource_map_new = dict(resources_map._resource_json_map)
resource_map_new[logical_id] = resource_json
should_be_created = template_deployer.should_be_deployed(
logical_id, resource_map_new, stack_name
)
# check for moto creation errors and raise an exception if needed
if moto_create_error:
if should_be_created:
raise moto_create_error
else:
LOG.info(
"Error on moto CF resource creation. Ignoring, as should_be_created=%s: %s"
% (should_be_created, moto_create_error)
)
# Fix for moto which sometimes hard-codes region name as 'us-east-1'
if hasattr(resource, "region_name") and resource.region_name != region_name:
LOG.debug(
"Updating incorrect region from %s to %s"
% (resource.region_name, region_name)
)
resource.region_name = region_name
# check whether this resource needs to be deployed
is_updateable = False
if not should_be_created:
# This resource is either not deployable or already exists. Check if it can be updated
is_updateable = template_deployer.is_updateable(
logical_id, resource_map_new, stack_name
)
if not update or not is_updateable:
all_satisfied = template_deployer.all_resource_dependencies_satisfied(
logical_id, resource_map_new, stack_name
)
if not all_satisfied:
LOG.info(
"Resource %s cannot be deployed, found unsatisfied dependencies. %s"
% (logical_id, resource_json)
)
details = [logical_id, resource_json, resources_map, region_name]
resources_map._unresolved_resources = getattr(
resources_map, "_unresolved_resources", {}
)
resources_map._unresolved_resources[logical_id] = details
else:
LOG.debug(
"Resource %s need not be deployed (is_updateable=%s): %s %s"
% (logical_id, is_updateable, resource_json, bool(resource))
)
# Return if this resource already exists and can/need not be updated yet
# NOTE: We should always return the resource here, to avoid duplicate
# creation of resources in moto!
return resource
# Apply some fixes/patches to the resource names, then deploy resource in LocalStack
update_resource_name(resource, resource_json)
LOG.debug(
"Deploying CloudFormation resource (update=%s, exists=%s, updateable=%s): %s"
% (update, not should_be_created, is_updateable, resource_json)
)
try:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = True
deploy_func = (
template_deployer.update_resource
if update
else template_deployer.deploy_resource
)
result = deploy_func(logical_id, resource_map_new, stack_name=stack_name)
finally:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = False
if not should_be_created:
# skip the parts below for update requests
return resource
def find_id(resource):
"""Find ID of the given resource."""
if not resource:
return
for id_attr in (
"Id",
"id",
"ResourceId",
"RestApiId",
"DeploymentId",
"RoleId",
):
if id_attr in resource:
return resource[id_attr]
# update resource IDs to avoid mismatch between CF moto and LocalStack backend resources
if hasattr(resource, "id") or (isinstance(resource, dict) and resource.get("id")):
existing_id = resource.id if hasattr(resource, "id") else resource["id"]
new_res_id = find_id(result)
LOG.debug(
"Updating resource id: %s - %s, %s - %s"
% (existing_id, new_res_id, resource, resource_json)
)
if new_res_id:
LOG.info(
"Updating resource ID from %s to %s (%s)"
% (existing_id, new_res_id, region_name)
)
update_resource_id(
resource,
new_res_id,
props,
region_name,
stack_name,
resources_map._resource_json_map,
)
else:
LOG.warning(
"Unable to extract id for resource %s: %s" % (logical_id, result)
)
# update physical_resource_id field
update_physical_resource_id(resource)
return resource
|
def _parse_and_create_resource(
logical_id,
resource_json,
resources_map,
region_name,
update=False,
force_create=False,
):
stack_name = resources_map.get("AWS::StackName")
resource_hash_key = (stack_name, logical_id)
props = resource_json["Properties"] = resource_json.get("Properties") or {}
# If the current stack is being updated, avoid infinite recursion
updating = CURRENTLY_UPDATING_RESOURCES.get(resource_hash_key)
LOG.debug(
"Currently processing stack resource %s/%s: %s"
% (stack_name, logical_id, updating)
)
if updating:
return None
# parse and get final resource JSON
resource_tuple = parsing.parse_resource(logical_id, resource_json, resources_map)
if not resource_tuple:
return None
_, resource_json, _ = resource_tuple
def add_default_props(resource_props):
"""apply some fixes which otherwise cause deployments to fail"""
res_type = resource_props["Type"]
props = resource_props.get("Properties", {})
if res_type == "AWS::Lambda::EventSourceMapping" and not props.get(
"StartingPosition"
):
props["StartingPosition"] = "LATEST"
# generate default names for certain resource types
default_attrs = (("AWS::IAM::Role", "RoleName"), ("AWS::Events::Rule", "Name"))
for entry in default_attrs:
if res_type == entry[0] and not props.get(entry[1]):
props[entry[1]] = "cf-%s-%s" % (stack_name, md5(canonical_json(props)))
# add some fixes and default props which otherwise cause deployments to fail
add_default_props(resource_json)
for resource in resources_map._resource_json_map.values():
add_default_props(resource)
# check if this resource already exists in the resource map
resource = resources_map._parsed_resources.get(logical_id)
if resource and not update and not force_create:
return resource
# fix resource ARNs, make sure to convert account IDs 000000000000 to 123456789012
resource_json_arns_fixed = clone(json_safe(convert_objs_to_ids(resource_json)))
set_moto_account_ids(resource_json_arns_fixed)
# create resource definition and store CloudFormation metadata in moto
moto_create_error = None
if (resource or update) and not force_create:
parse_and_update_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
elif not resource:
try:
resource = parse_and_create_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
resource.logical_id = logical_id
except Exception as e:
moto_create_error = e
# check whether this resource needs to be deployed
resource_map_new = dict(resources_map._resource_json_map)
resource_map_new[logical_id] = resource_json
should_be_created = template_deployer.should_be_deployed(
logical_id, resource_map_new, stack_name
)
# check for moto creation errors and raise an exception if needed
if moto_create_error:
if should_be_created:
raise moto_create_error
else:
LOG.info(
"Error on moto CF resource creation. Ignoring, as should_be_created=%s: %s"
% (should_be_created, moto_create_error)
)
# Fix for moto which sometimes hard-codes region name as 'us-east-1'
if hasattr(resource, "region_name") and resource.region_name != region_name:
LOG.debug(
"Updating incorrect region from %s to %s"
% (resource.region_name, region_name)
)
resource.region_name = region_name
# check whether this resource needs to be deployed
is_updateable = False
if not should_be_created:
# This resource is either not deployable or already exists. Check if it can be updated
is_updateable = template_deployer.is_updateable(
logical_id, resource_map_new, stack_name
)
if not update or not is_updateable:
all_satisfied = template_deployer.all_resource_dependencies_satisfied(
logical_id, resource_map_new, stack_name
)
if not all_satisfied:
LOG.info(
"Resource %s cannot be deployed, found unsatisfied dependencies. %s"
% (logical_id, resource_json)
)
details = [logical_id, resource_json, resources_map, region_name]
resources_map._unresolved_resources = getattr(
resources_map, "_unresolved_resources", {}
)
resources_map._unresolved_resources[logical_id] = details
else:
LOG.debug(
"Resource %s need not be deployed (is_updateable=%s): %s %s"
% (logical_id, is_updateable, resource_json, bool(resource))
)
# Return if this resource already exists and can/need not be updated yet
# NOTE: We should always return the resource here, to avoid duplicate
# creation of resources in moto!
return resource
# Apply some fixes/patches to the resource names, then deploy resource in LocalStack
update_resource_name(resource, resource_json)
LOG.debug(
"Deploying CloudFormation resource (update=%s, exists=%s, updateable=%s): %s"
% (update, not should_be_created, is_updateable, resource_json)
)
try:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = True
deploy_func = (
template_deployer.update_resource
if update
else template_deployer.deploy_resource
)
result = deploy_func(logical_id, resource_map_new, stack_name=stack_name)
finally:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = False
if not should_be_created:
# skip the parts below for update requests
return resource
def find_id(resource):
"""Find ID of the given resource."""
if not resource:
return
for id_attr in (
"Id",
"id",
"ResourceId",
"RestApiId",
"DeploymentId",
"RoleId",
):
if id_attr in resource:
return resource[id_attr]
# update resource IDs to avoid mismatch between CF moto and LocalStack backend resources
if hasattr(resource, "id") or (isinstance(resource, dict) and resource.get("id")):
existing_id = resource.id if hasattr(resource, "id") else resource["id"]
new_res_id = find_id(result)
LOG.debug(
"Updating resource id: %s - %s, %s - %s"
% (existing_id, new_res_id, resource, resource_json)
)
if new_res_id:
LOG.info(
"Updating resource ID from %s to %s (%s)"
% (existing_id, new_res_id, region_name)
)
update_resource_id(
resource,
new_res_id,
props,
region_name,
stack_name,
resources_map._resource_json_map,
)
else:
LOG.warning(
"Unable to extract id for resource %s: %s" % (logical_id, result)
)
# update physical_resource_id field
update_physical_resource_id(resource)
return resource
|
https://github.com/localstack/localstack/issues/2083
|
localstack_1 | 2020-02-24T14:57:32:ERROR:localstack.services.cloudformation.cloudformation_starter: Unable to parse and create resource "MyTestSubscription": Parameter validation failed:
localstack_1 | Missing required parameter in input: "TopicArn" Traceback (most recent call last):
localstack_1 | File "/opt/code/localstack/localstack/services/cloudformation/cloudformation_starter.py", line 185, in parse_and_create_resource
localstack_1 | return _parse_and_create_resource(logical_id, resource_json,
localstack_1 | File "/opt/code/localstack/localstack/services/cloudformation/cloudformation_starter.py", line 287, in _parse_and_create_resource
localstack_1 | result = deploy_func(logical_id, resource_map_new, stack_name=stack_name)
localstack_1 | File "/opt/code/localstack/localstack/utils/cloudformation/template_deployer.py", line 823, in deploy_resource
localstack_1 | return execute_resource_action(resource_id, resources, stack_name, ACTION_CREATE)
localstack_1 | File "/opt/code/localstack/localstack/utils/cloudformation/template_deployer.py", line 849, in execute_resource_action
localstack_1 | result = configure_resource_via_sdk(resource_id, resources, resource_type, func, stack_name)
localstack_1 | File "/opt/code/localstack/localstack/utils/cloudformation/template_deployer.py", line 922, in configure_resource_via_sdk
localstack_1 | raise e
localstack_1 | File "/opt/code/localstack/localstack/utils/cloudformation/template_deployer.py", line 919, in configure_resource_via_sdk
localstack_1 | result = function(**params)
localstack_1 | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/botocore/client.py", line 316, in _api_call
localstack_1 | return self._make_api_call(operation_name, kwargs)
localstack_1 | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/botocore/client.py", line 598, in _make_api_call
localstack_1 | request_dict = self._convert_to_request_dict(
localstack_1 | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/botocore/client.py", line 646, in _convert_to_request_dict
localstack_1 | request_dict = self._serializer.serialize_to_request(
localstack_1 | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/botocore/validate.py", line 297, in serialize_to_request
localstack_1 | raise ParamValidationError(report=report.generate_report())
localstack_1 | botocore.exceptions.ParamValidationError: Parameter validation failed:
localstack_1 | Missing required parameter in input: "TopicArn"
|
ParamValidationError
|
def apply_patches():
s3_models.DEFAULT_KEY_BUFFER_SIZE = S3_MAX_FILE_SIZE_MB * 1024 * 1024
def init(
self,
name,
value,
storage="STANDARD",
etag=None,
is_versioned=False,
version_id=0,
max_buffer_size=None,
*args,
**kwargs,
):
return original_init(
self,
name,
value,
storage=storage,
etag=etag,
is_versioned=is_versioned,
version_id=version_id,
max_buffer_size=s3_models.DEFAULT_KEY_BUFFER_SIZE,
*args,
**kwargs,
)
original_init = s3_models.FakeKey.__init__
s3_models.FakeKey.__init__ = init
def s3_update_acls(self, request, query, bucket_name, key_name):
# fix for - https://github.com/localstack/localstack/issues/1733
# - https://github.com/localstack/localstack/issues/1170
acl_key = "acl|%s|%s" % (bucket_name, key_name)
acl = self._acl_from_headers(request.headers)
if acl:
TMP_STATE[acl_key] = acl
if not query.get("uploadId"):
return
bucket = self.backend.get_bucket(bucket_name)
key = bucket and self.backend.get_key(bucket_name, key_name)
if not key:
return
acl = acl or TMP_STATE.pop(acl_key, None) or bucket.acl
if acl:
key.set_acl(acl)
# patch Bucket.create_from_cloudformation_json in moto
@classmethod
def Bucket_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
result = create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
# remove the bucket from the backend, as our template_deployer will take care of creating the resource
resource_name = s3_listener.normalize_bucket_name(resource_name)
s3_models.s3_backend.buckets.pop(resource_name)
return result
create_from_cloudformation_json_orig = (
s3_models.FakeBucket.create_from_cloudformation_json
)
s3_models.FakeBucket.create_from_cloudformation_json = (
Bucket_create_from_cloudformation_json
)
# patch S3Bucket.create_bucket(..)
def create_bucket(self, bucket_name, region_name, *args, **kwargs):
bucket_name = s3_listener.normalize_bucket_name(bucket_name)
return create_bucket_orig(bucket_name, region_name, *args, **kwargs)
create_bucket_orig = s3_models.s3_backend.create_bucket
s3_models.s3_backend.create_bucket = types.MethodType(
create_bucket, s3_models.s3_backend
)
# patch S3Bucket.get_bucket(..)
def get_bucket(self, bucket_name, *args, **kwargs):
bucket_name = s3_listener.normalize_bucket_name(bucket_name)
if bucket_name == BUCKET_MARKER_LOCAL:
return None
return get_bucket_orig(bucket_name, *args, **kwargs)
get_bucket_orig = s3_models.s3_backend.get_bucket
s3_models.s3_backend.get_bucket = types.MethodType(get_bucket, s3_models.s3_backend)
# patch S3Bucket.get_bucket(..)
def delete_bucket(self, bucket_name, *args, **kwargs):
bucket_name = s3_listener.normalize_bucket_name(bucket_name)
return delete_bucket_orig(bucket_name, *args, **kwargs)
delete_bucket_orig = s3_models.s3_backend.delete_bucket
s3_models.s3_backend.delete_bucket = types.MethodType(
delete_bucket, s3_models.s3_backend
)
# patch _key_response_post(..)
def s3_key_response_post(
self, request, body, bucket_name, query, key_name, *args, **kwargs
):
result = s3_key_response_post_orig(
request, body, bucket_name, query, key_name, *args, **kwargs
)
s3_update_acls(self, request, query, bucket_name, key_name)
return result
s3_key_response_post_orig = s3_responses.S3ResponseInstance._key_response_post
s3_responses.S3ResponseInstance._key_response_post = types.MethodType(
s3_key_response_post, s3_responses.S3ResponseInstance
)
# patch _key_response_put(..)
def s3_key_response_put(
self, request, body, bucket_name, query, key_name, headers, *args, **kwargs
):
result = s3_key_response_put_orig(
request, body, bucket_name, query, key_name, headers, *args, **kwargs
)
s3_update_acls(self, request, query, bucket_name, key_name)
return result
s3_key_response_put_orig = s3_responses.S3ResponseInstance._key_response_put
s3_responses.S3ResponseInstance._key_response_put = types.MethodType(
s3_key_response_put, s3_responses.S3ResponseInstance
)
# patch DeleteObjectTagging
def s3_key_response_delete(self, bucket_name, query, key_name, *args, **kwargs):
# Fixes https://github.com/localstack/localstack/issues/1083
if query.get("tagging"):
self._set_action("KEY", "DELETE", query)
self._authenticate_and_authorize_s3_action()
key = self.backend.get_key(bucket_name, key_name)
key.tags = s3_models.FakeTagging()
return 204, {}, ""
result = s3_key_response_delete_orig(
bucket_name, query, key_name, *args, **kwargs
)
return result
s3_key_response_delete_orig = s3_responses.S3ResponseInstance._key_response_delete
s3_responses.S3ResponseInstance._key_response_delete = types.MethodType(
s3_key_response_delete, s3_responses.S3ResponseInstance
)
s3_responses.ACTION_MAP["KEY"]["DELETE"]["tagging"] = "DeleteObjectTagging"
# patch max-keys
def s3_truncate_result(self, result_keys, max_keys):
return s3_truncate_result_orig(result_keys, max_keys or 1000)
s3_truncate_result_orig = s3_responses.S3ResponseInstance._truncate_result
s3_responses.S3ResponseInstance._truncate_result = types.MethodType(
s3_truncate_result, s3_responses.S3ResponseInstance
)
# patch _bucket_response_delete_keys(..)
# https://github.com/localstack/localstack/issues/2077
s3_delete_keys_response_template = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
{% for k in deleted %}
<Deleted>
<Key>{{k.key}}</Key>
<VersionId>{{k.version_id}}</VersionId>
</Deleted>
{% endfor %}
{% for k in delete_errors %}
<Error>
<Key>{{k}}</Key>
</Error>
{% endfor %}
</DeleteResult>"""
def s3_bucket_response_delete_keys(self, request, body, bucket_name):
template = self.response_template(s3_delete_keys_response_template)
elements = minidom.parseString(body).getElementsByTagName("Object")
if len(elements) == 0:
raise MalformedXML()
deleted_names = []
error_names = []
keys = []
for element in elements:
if len(element.getElementsByTagName("VersionId")) == 0:
version_id = None
else:
version_id = element.getElementsByTagName("VersionId")[
0
].firstChild.nodeValue
keys.append(
{
"key_name": element.getElementsByTagName("Key")[
0
].firstChild.nodeValue,
"version_id": version_id,
}
)
for k in keys:
key_name = k["key_name"]
version_id = k["version_id"]
success = self.backend.delete_key(
bucket_name, undo_clean_key_name(key_name), version_id
)
if success:
deleted_names.append({"key": key_name, "version_id": version_id})
else:
error_names.append(key_name)
return (
200,
{},
template.render(deleted=deleted_names, delete_errors=error_names),
)
s3_responses.S3ResponseInstance._bucket_response_delete_keys = types.MethodType(
s3_bucket_response_delete_keys, s3_responses.S3ResponseInstance
)
# Patch _handle_range_header(..)
# https://github.com/localstack/localstack/issues/2146
s3_response_handle_range_header_orig = (
s3_responses.S3ResponseInstance._handle_range_header
)
def s3_response_handle_range_header(self, request, headers, response_content):
rs_code, rs_headers, rs_content = s3_response_handle_range_header_orig(
request, headers, response_content
)
if rs_code == 206:
for k in ["ETag", "last-modified"]:
v = headers.get(k)
if v and not rs_headers.get(k):
rs_headers[k] = v
return rs_code, rs_headers, rs_content
s3_responses.S3ResponseInstance._handle_range_header = types.MethodType(
s3_response_handle_range_header, s3_responses.S3ResponseInstance
)
|
def apply_patches():
s3_models.DEFAULT_KEY_BUFFER_SIZE = S3_MAX_FILE_SIZE_MB * 1024 * 1024
def init(
self,
name,
value,
storage="STANDARD",
etag=None,
is_versioned=False,
version_id=0,
max_buffer_size=None,
*args,
**kwargs,
):
return original_init(
self,
name,
value,
storage=storage,
etag=etag,
is_versioned=is_versioned,
version_id=version_id,
max_buffer_size=s3_models.DEFAULT_KEY_BUFFER_SIZE,
*args,
**kwargs,
)
original_init = s3_models.FakeKey.__init__
s3_models.FakeKey.__init__ = init
def s3_update_acls(self, request, query, bucket_name, key_name):
# fix for - https://github.com/localstack/localstack/issues/1733
# - https://github.com/localstack/localstack/issues/1170
acl_key = "acl|%s|%s" % (bucket_name, key_name)
acl = self._acl_from_headers(request.headers)
if acl:
TMP_STATE[acl_key] = acl
if not query.get("uploadId"):
return
bucket = self.backend.get_bucket(bucket_name)
key = bucket and self.backend.get_key(bucket_name, key_name)
if not key:
return
acl = acl or TMP_STATE.pop(acl_key, None) or bucket.acl
if acl:
key.set_acl(acl)
# patch Bucket.create_from_cloudformation_json in moto
@classmethod
def Bucket_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
result = create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
# remove the bucket from the backend, as our template_deployer will take care of creating the resource
resource_name = s3_listener.normalize_bucket_name(resource_name)
s3_models.s3_backend.buckets.pop(resource_name)
return result
create_from_cloudformation_json_orig = (
s3_models.FakeBucket.create_from_cloudformation_json
)
s3_models.FakeBucket.create_from_cloudformation_json = (
Bucket_create_from_cloudformation_json
)
# patch S3Bucket.create_bucket(..)
def create_bucket(self, bucket_name, region_name, *args, **kwargs):
bucket_name = s3_listener.normalize_bucket_name(bucket_name)
return create_bucket_orig(bucket_name, region_name, *args, **kwargs)
create_bucket_orig = s3_models.s3_backend.create_bucket
s3_models.s3_backend.create_bucket = types.MethodType(
create_bucket, s3_models.s3_backend
)
# patch S3Bucket.get_bucket(..)
def get_bucket(self, bucket_name, *args, **kwargs):
bucket_name = s3_listener.normalize_bucket_name(bucket_name)
return get_bucket_orig(bucket_name, *args, **kwargs)
get_bucket_orig = s3_models.s3_backend.get_bucket
s3_models.s3_backend.get_bucket = types.MethodType(get_bucket, s3_models.s3_backend)
# patch S3Bucket.get_bucket(..)
def delete_bucket(self, bucket_name, *args, **kwargs):
bucket_name = s3_listener.normalize_bucket_name(bucket_name)
return delete_bucket_orig(bucket_name, *args, **kwargs)
delete_bucket_orig = s3_models.s3_backend.delete_bucket
s3_models.s3_backend.delete_bucket = types.MethodType(
delete_bucket, s3_models.s3_backend
)
# patch _key_response_post(..)
def s3_key_response_post(
self, request, body, bucket_name, query, key_name, *args, **kwargs
):
result = s3_key_response_post_orig(
request, body, bucket_name, query, key_name, *args, **kwargs
)
s3_update_acls(self, request, query, bucket_name, key_name)
return result
s3_key_response_post_orig = s3_responses.S3ResponseInstance._key_response_post
s3_responses.S3ResponseInstance._key_response_post = types.MethodType(
s3_key_response_post, s3_responses.S3ResponseInstance
)
# patch _key_response_put(..)
def s3_key_response_put(
self, request, body, bucket_name, query, key_name, headers, *args, **kwargs
):
result = s3_key_response_put_orig(
request, body, bucket_name, query, key_name, headers, *args, **kwargs
)
s3_update_acls(self, request, query, bucket_name, key_name)
return result
s3_key_response_put_orig = s3_responses.S3ResponseInstance._key_response_put
s3_responses.S3ResponseInstance._key_response_put = types.MethodType(
s3_key_response_put, s3_responses.S3ResponseInstance
)
# patch DeleteObjectTagging
def s3_key_response_delete(self, bucket_name, query, key_name, *args, **kwargs):
# Fixes https://github.com/localstack/localstack/issues/1083
if query.get("tagging"):
self._set_action("KEY", "DELETE", query)
self._authenticate_and_authorize_s3_action()
key = self.backend.get_key(bucket_name, key_name)
key.tags = s3_models.FakeTagging()
return 204, {}, ""
result = s3_key_response_delete_orig(
bucket_name, query, key_name, *args, **kwargs
)
return result
s3_key_response_delete_orig = s3_responses.S3ResponseInstance._key_response_delete
s3_responses.S3ResponseInstance._key_response_delete = types.MethodType(
s3_key_response_delete, s3_responses.S3ResponseInstance
)
s3_responses.ACTION_MAP["KEY"]["DELETE"]["tagging"] = "DeleteObjectTagging"
# patch max-keys
def s3_truncate_result(self, result_keys, max_keys):
return s3_truncate_result_orig(result_keys, max_keys or 1000)
s3_truncate_result_orig = s3_responses.S3ResponseInstance._truncate_result
s3_responses.S3ResponseInstance._truncate_result = types.MethodType(
s3_truncate_result, s3_responses.S3ResponseInstance
)
# patch _bucket_response_delete_keys(..)
# https://github.com/localstack/localstack/issues/2077
s3_delete_keys_response_template = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
{% for k in deleted %}
<Deleted>
<Key>{{k.key}}</Key>
<VersionId>{{k.version_id}}</VersionId>
</Deleted>
{% endfor %}
{% for k in delete_errors %}
<Error>
<Key>{{k}}</Key>
</Error>
{% endfor %}
</DeleteResult>"""
def s3_bucket_response_delete_keys(self, request, body, bucket_name):
template = self.response_template(s3_delete_keys_response_template)
elements = minidom.parseString(body).getElementsByTagName("Object")
if len(elements) == 0:
raise MalformedXML()
deleted_names = []
error_names = []
keys = []
for element in elements:
if len(element.getElementsByTagName("VersionId")) == 0:
version_id = None
else:
version_id = element.getElementsByTagName("VersionId")[
0
].firstChild.nodeValue
keys.append(
{
"key_name": element.getElementsByTagName("Key")[
0
].firstChild.nodeValue,
"version_id": version_id,
}
)
for k in keys:
key_name = k["key_name"]
version_id = k["version_id"]
success = self.backend.delete_key(
bucket_name, undo_clean_key_name(key_name), version_id
)
if success:
deleted_names.append({"key": key_name, "version_id": version_id})
else:
error_names.append(key_name)
return (
200,
{},
template.render(deleted=deleted_names, delete_errors=error_names),
)
s3_responses.S3ResponseInstance._bucket_response_delete_keys = types.MethodType(
s3_bucket_response_delete_keys, s3_responses.S3ResponseInstance
)
# Patch _handle_range_header(..)
# https://github.com/localstack/localstack/issues/2146
s3_response_handle_range_header_orig = (
s3_responses.S3ResponseInstance._handle_range_header
)
def s3_response_handle_range_header(self, request, headers, response_content):
rs_code, rs_headers, rs_content = s3_response_handle_range_header_orig(
request, headers, response_content
)
if rs_code == 206:
for k in ["ETag", "last-modified"]:
v = headers.get(k)
if v and not rs_headers.get(k):
rs_headers[k] = v
return rs_code, rs_headers, rs_content
s3_responses.S3ResponseInstance._handle_range_header = types.MethodType(
s3_response_handle_range_header, s3_responses.S3ResponseInstance
)
|
https://github.com/localstack/localstack/issues/2164
|
localstack_main | 2020-03-17T14:10:42:warning:moto: No Moto CloudFormation support for AWS::S3::BucketPolicy
localstack_main | 2020-03-17T14:10:47:warning:localstack.services.awslambda.lambda_api: Function not found: arn:aws:lambda:us-east-1:000000000000:function:sample1-local-downloadFile
localstack_main | 2020-03-17T14:10:47:warning:localstack.services.cloudformation.cloudformation_starter: Unable to extract id for resource IamRoleLambdaExecution: {'Role': {'Path': '/', 'RoleName': 'sample1-local-us-east-1-lambdaRole', 'RoleId': '4uzole7bkp69kfgntgrq', 'Arn': 'arn:aws:iam::000000000000:role/sample1-local-us-east-1-lambdaRole', 'CreateDate': datetime.datetime(2020, 3, 17, 14, 10, 47, 927000, tzinfo=tzlocal()), 'AssumeRolePolicyDocument': {'Version': '2012-10-17', 'Statement': [{'Effect': 'Allow', 'Principal': {'Service': ['lambda.amazonaws.com']}, 'Action': ['sts:AssumeRole']}]}, 'MaxSessionDuration': 3600}, 'ResponseMetadata': {'RequestId': '4a93ceee-9966-11e1-b624-b1aEXAMPLE7c', 'HTTPStatusCode': 200, 'HTTPHeaders': {'server': 'BaseHTTP/0.6 Python/3.8.2', 'date': 'Tue, 17 Mar 2020 14:10:47 GMT', 'content-type': 'text/html; charset=utf-8', 'content-length': '709', 'access-control-allow-origin': '*', 'access-control-allow-methods': 'HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH', 'access-control-allow-headers': 'authorization,content-type,content-md5,cache-control,x-amz-content-sha256,x-amz-date,x-amz-security-token,x-amz-user-agent,x-amz-target,x-amz-acl,x-amz-version-id,x-localstack-target,x-amz-tagging', 'access-control-expose-headers': 'x-amz-version-id'}, 'RetryAttempts': 0}}
localstack_main | 2020-03-17T14:10:47:warning:localstack.services.awslambda.lambda_api: Function not found: arn:aws:lambda:us-east-1:000000000000:function:sample1-local-downloadFile
localstack_main | 2020-03-17T14:10:48:ERROR:localstack.services.cloudformation.cloudformation_starter: Unable to parse and create resource "DownloadFileLambdaFunction": An error occurred (InvalidParameterValueException) when calling the None operation: Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist Traceback (most recent call last):
localstack_main | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/moto/s3/models.py", line 1190, in get_bucket
localstack_main | return self.buckets[bucket_name]
localstack_main | KeyError: '__local__'
localstack_main |
localstack_main | During handling of the above exception, another exception occurred:
localstack_main |
localstack_main | Traceback (most recent call last):
localstack_main | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/moto/awslambda/models.py", line 219, in __init__
localstack_main | key = s3_backend.get_key(self.code["S3Bucket"], self.code["S3Key"])
localstack_main | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/moto/s3/models.py", line 1323, in get_key
localstack_main | bucket = self.get_bucket(bucket_name)
localstack_main | File "/opt/code/localstack/localstack/services/s3/s3_starter.py", line 103, in get_bucket
localstack_main | return get_bucket_orig(bucket_name, *args, **kwargs)
localstack_main | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/moto/s3/models.py", line 1192, in get_bucket
localstack_main | raise MissingBucket(bucket=bucket_name)
localstack_main | moto.s3.exceptions.MissingBucket: 404 Not Found: <?xml version="1.0" encoding="UTF-8"?>
localstack_main | <Error>
localstack_main | <Code>NoSuchBucket</Code>
localstack_main | <Message>The specified bucket does not exist</Message>
localstack_main | <BucketName>__local__</BucketName>
localstack_main | <RequestID>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestID>
localstack_main | </Error>
localstack_main |
localstack_main | During handling of the above exception, another exception occurred:
localstack_main |
localstack_main | Traceback (most recent call last):
localstack_main | File "/opt/code/localstack/localstack/services/cloudformation/cloudformation_starter.py", line 174, in parse_and_create_resource
localstack_main | return _parse_and_create_resource(logical_id, resource_json,
localstack_main | File "/opt/code/localstack/localstack/services/cloudformation/cloudformation_starter.py", line 242, in _parse_and_create_resource
localstack_main | raise moto_create_error
localstack_main | File "/opt/code/localstack/localstack/services/cloudformation/cloudformation_starter.py", line 228, in _parse_and_create_resource
localstack_main | resource = parse_and_create_resource_orig(logical_id,
localstack_main | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/moto/cloudformation/parsing.py", line 338, in parse_and_create_resource
localstack_main | resource = resource_class.create_from_cloudformation_json(
localstack_main | File "/opt/code/localstack/localstack/services/cloudformation/cloudformation_starter.py", line 549, in Lambda_create_from_cloudformation_json
localstack_main | return Lambda_create_from_cloudformation_json_orig(resource_name, cloudformation_json, region_name)
localstack_main | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/moto/awslambda/models.py", line 516, in create_from_cloudformation_json
localstack_main | fn = backend.create_function(spec)
localstack_main | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/moto/awslambda/models.py", line 822, in create_function
localstack_main | fn = LambdaFunction(spec, self.region_name, version="$LATEST")
localstack_main | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/moto/awslambda/models.py", line 222, in __init__
localstack_main | raise InvalidParameterValueException(
localstack_main | moto.awslambda.exceptions.InvalidParameterValueException: An error occurred (InvalidParameterValueException) when calling the None operation: Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist
|
KeyError
|
def get_bucket(self, bucket_name, *args, **kwargs):
bucket_name = s3_listener.normalize_bucket_name(bucket_name)
if bucket_name == BUCKET_MARKER_LOCAL:
return None
return get_bucket_orig(bucket_name, *args, **kwargs)
|
def get_bucket(self, bucket_name, *args, **kwargs):
bucket_name = s3_listener.normalize_bucket_name(bucket_name)
return get_bucket_orig(bucket_name, *args, **kwargs)
|
https://github.com/localstack/localstack/issues/2164
|
localstack_main | 2020-03-17T14:10:42:warning:moto: No Moto CloudFormation support for AWS::S3::BucketPolicy
localstack_main | 2020-03-17T14:10:47:warning:localstack.services.awslambda.lambda_api: Function not found: arn:aws:lambda:us-east-1:000000000000:function:sample1-local-downloadFile
localstack_main | 2020-03-17T14:10:47:warning:localstack.services.cloudformation.cloudformation_starter: Unable to extract id for resource IamRoleLambdaExecution: {'Role': {'Path': '/', 'RoleName': 'sample1-local-us-east-1-lambdaRole', 'RoleId': '4uzole7bkp69kfgntgrq', 'Arn': 'arn:aws:iam::000000000000:role/sample1-local-us-east-1-lambdaRole', 'CreateDate': datetime.datetime(2020, 3, 17, 14, 10, 47, 927000, tzinfo=tzlocal()), 'AssumeRolePolicyDocument': {'Version': '2012-10-17', 'Statement': [{'Effect': 'Allow', 'Principal': {'Service': ['lambda.amazonaws.com']}, 'Action': ['sts:AssumeRole']}]}, 'MaxSessionDuration': 3600}, 'ResponseMetadata': {'RequestId': '4a93ceee-9966-11e1-b624-b1aEXAMPLE7c', 'HTTPStatusCode': 200, 'HTTPHeaders': {'server': 'BaseHTTP/0.6 Python/3.8.2', 'date': 'Tue, 17 Mar 2020 14:10:47 GMT', 'content-type': 'text/html; charset=utf-8', 'content-length': '709', 'access-control-allow-origin': '*', 'access-control-allow-methods': 'HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH', 'access-control-allow-headers': 'authorization,content-type,content-md5,cache-control,x-amz-content-sha256,x-amz-date,x-amz-security-token,x-amz-user-agent,x-amz-target,x-amz-acl,x-amz-version-id,x-localstack-target,x-amz-tagging', 'access-control-expose-headers': 'x-amz-version-id'}, 'RetryAttempts': 0}}
localstack_main | 2020-03-17T14:10:47:warning:localstack.services.awslambda.lambda_api: Function not found: arn:aws:lambda:us-east-1:000000000000:function:sample1-local-downloadFile
localstack_main | 2020-03-17T14:10:48:ERROR:localstack.services.cloudformation.cloudformation_starter: Unable to parse and create resource "DownloadFileLambdaFunction": An error occurred (InvalidParameterValueException) when calling the None operation: Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist Traceback (most recent call last):
localstack_main | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/moto/s3/models.py", line 1190, in get_bucket
localstack_main | return self.buckets[bucket_name]
localstack_main | KeyError: '__local__'
localstack_main |
localstack_main | During handling of the above exception, another exception occurred:
localstack_main |
localstack_main | Traceback (most recent call last):
localstack_main | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/moto/awslambda/models.py", line 219, in __init__
localstack_main | key = s3_backend.get_key(self.code["S3Bucket"], self.code["S3Key"])
localstack_main | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/moto/s3/models.py", line 1323, in get_key
localstack_main | bucket = self.get_bucket(bucket_name)
localstack_main | File "/opt/code/localstack/localstack/services/s3/s3_starter.py", line 103, in get_bucket
localstack_main | return get_bucket_orig(bucket_name, *args, **kwargs)
localstack_main | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/moto/s3/models.py", line 1192, in get_bucket
localstack_main | raise MissingBucket(bucket=bucket_name)
localstack_main | moto.s3.exceptions.MissingBucket: 404 Not Found: <?xml version="1.0" encoding="UTF-8"?>
localstack_main | <Error>
localstack_main | <Code>NoSuchBucket</Code>
localstack_main | <Message>The specified bucket does not exist</Message>
localstack_main | <BucketName>__local__</BucketName>
localstack_main | <RequestID>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestID>
localstack_main | </Error>
localstack_main |
localstack_main | During handling of the above exception, another exception occurred:
localstack_main |
localstack_main | Traceback (most recent call last):
localstack_main | File "/opt/code/localstack/localstack/services/cloudformation/cloudformation_starter.py", line 174, in parse_and_create_resource
localstack_main | return _parse_and_create_resource(logical_id, resource_json,
localstack_main | File "/opt/code/localstack/localstack/services/cloudformation/cloudformation_starter.py", line 242, in _parse_and_create_resource
localstack_main | raise moto_create_error
localstack_main | File "/opt/code/localstack/localstack/services/cloudformation/cloudformation_starter.py", line 228, in _parse_and_create_resource
localstack_main | resource = parse_and_create_resource_orig(logical_id,
localstack_main | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/moto/cloudformation/parsing.py", line 338, in parse_and_create_resource
localstack_main | resource = resource_class.create_from_cloudformation_json(
localstack_main | File "/opt/code/localstack/localstack/services/cloudformation/cloudformation_starter.py", line 549, in Lambda_create_from_cloudformation_json
localstack_main | return Lambda_create_from_cloudformation_json_orig(resource_name, cloudformation_json, region_name)
localstack_main | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/moto/awslambda/models.py", line 516, in create_from_cloudformation_json
localstack_main | fn = backend.create_function(spec)
localstack_main | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/moto/awslambda/models.py", line 822, in create_function
localstack_main | fn = LambdaFunction(spec, self.region_name, version="$LATEST")
localstack_main | File "/opt/code/localstack/.venv/lib/python3.8/site-packages/moto/awslambda/models.py", line 222, in __init__
localstack_main | raise InvalidParameterValueException(
localstack_main | moto.awslambda.exceptions.InvalidParameterValueException: An error occurred (InvalidParameterValueException) when calling the None operation: Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist
|
KeyError
|
def do_run(cmd, asynchronous, print_output=None, env_vars={}):
sys.stdout.flush()
if asynchronous:
if is_debug() and print_output is None:
print_output = True
outfile = subprocess.PIPE if print_output else None
t = ShellCommandThread(cmd, outfile=outfile, env_vars=env_vars)
t.start()
TMP_THREADS.append(t)
return t
return run(cmd, env_vars=env_vars)
|
def do_run(cmd, asynchronous, print_output=False, env_vars={}):
sys.stdout.flush()
if asynchronous:
if is_debug():
print_output = True
outfile = subprocess.PIPE if print_output else None
t = ShellCommandThread(cmd, outfile=outfile, env_vars=env_vars)
t.start()
TMP_THREADS.append(t)
return t
return run(cmd, env_vars=env_vars)
|
https://github.com/localstack/localstack/issues/1723
|
2019-11-05T14:38:26:WARNING:localstack.utils.cloudformation.template_deployer: Error calling <bound method ClientCreator._create_api_method.<locals>._api_call of <botocore.client.SQS object at 0x7f3d9ab15a58>> with params: {'QueueName': 'A-DLQ', 'Attributes': {'MessageRetentionPeriod': '1209600'}, 'tags': [{'Key': 'Name', 'Value': 'A-DLQ'}, {'Key': 'Cluster', 'Value': 'A'}]} for resource: {'Type': 'AWS::SQS::Queue', 'Properties': {'QueueName': 'A-DLQ', 'MessageRetentionPeriod': 1209600, 'Tags': [{'Key': 'Name', 'Value': 'A-DLQ'}, {'Key': 'Cluster', 'Value': 'AEvents'}]}}
localstack_1 | 2019-11-05T14:38:26:ERROR:localstack.services.cloudformation.cloudformation_starter: Unable to parse and create resource "DLQ": Parameter validation failed:
localstack_1 | Invalid type for parameter tags, value: [{'Key': 'Name', 'Value': 'A-DLQ'}, {'Key': 'Cluster', 'Value': 'A'}], type: <class 'list'>, valid types: <class 'dict'> Traceback (most recent call last):
localstack_1 | File "/opt/code/localstack/localstack/services/cloudformation/cloudformation_starter.py", line 149, in parse_and_create_resource
localstack_1 | return _parse_and_create_resource(logical_id, resource_json, resources_map, region_name)
localstack_1 | File "/opt/code/localstack/localstack/services/cloudformation/cloudformation_starter.py", line 208, in _parse_and_create_resource
localstack_1 | result = deploy_func(logical_id, resource_wrapped, stack_name=stack_name)
localstack_1 | File "/opt/code/localstack/localstack/utils/cloudformation/template_deployer.py", line 647, in deploy_resource
localstack_1 | raise e
localstack_1 | File "/opt/code/localstack/localstack/utils/cloudformation/template_deployer.py", line 644, in deploy_resource
localstack_1 | result = function(**params)
localstack_1 | File "/opt/code/localstack/.venv/lib/python3.6/site-packages/botocore/client.py", line 357, in _api_call
localstack_1 | return self._make_api_call(operation_name, kwargs)
localstack_1 | File "/opt/code/localstack/.venv/lib/python3.6/site-packages/botocore/client.py", line 634, in _make_api_call
localstack_1 | api_params, operation_model, context=request_context)
localstack_1 | File "/opt/code/localstack/.venv/lib/python3.6/site-packages/botocore/client.py", line 682, in _convert_to_request_dict
localstack_1 | api_params, operation_model)
localstack_1 | File "/opt/code/localstack/.venv/lib/python3.6/site-packages/botocore/validate.py", line 297, in serialize_to_request
localstack_1 | raise ParamValidationError(report=report.generate_report())
localstack_1 | botocore.exceptions.ParamValidationError: Parameter validation failed:
localstack_1 | Invalid type for parameter tags, value: [{'Key': 'Name', 'Value': 'A-DLQ'}, {'Key': 'Cluster', 'Value': 'AEvents'}], type: <class 'list'>, valid types: <class 'dict'>
localstack_1 |
|
ParamValidationError
|
def apply_patches():
"""Apply patches to make LocalStack seamlessly interact with the moto backend.
TODO: Eventually, these patches should be contributed to the upstream repo!"""
# add model mappings to moto
parsing.MODEL_MAP.update(MODEL_MAP)
# Patch S3Backend.get_key method in moto to use S3 API from LocalStack
def get_key(self, bucket_name, key_name, version_id=None):
s3_client = aws_stack.connect_to_service("s3")
value = b""
if bucket_name != BUCKET_MARKER_LOCAL:
value = s3_client.get_object(Bucket=bucket_name, Key=key_name)[
"Body"
].read()
return s3_models.FakeKey(name=key_name, value=value)
s3_models.S3Backend.get_key = get_key
# Patch clean_json in moto
def clean_json(resource_json, resources_map):
result = clean_json_orig(resource_json, resources_map)
if isinstance(result, BaseModel):
if isinstance(resource_json, dict) and "Ref" in resource_json:
entity_id = get_entity_id(result, resource_json)
if entity_id:
return entity_id
LOG.warning(
'Unable to resolve "Ref" attribute for: %s - %s - %s',
resource_json,
result,
type(result),
)
return result
clean_json_orig = parsing.clean_json
parsing.clean_json = clean_json
# Patch parse_and_create_resource method in moto to deploy resources in LocalStack
def parse_and_create_resource(
logical_id, resource_json, resources_map, region_name, force_create=False
):
try:
return _parse_and_create_resource(
logical_id,
resource_json,
resources_map,
region_name,
force_create=force_create,
)
except Exception as e:
LOG.error(
'Unable to parse and create resource "%s": %s %s'
% (logical_id, e, traceback.format_exc())
)
raise
def parse_and_update_resource(
logical_id, resource_json, resources_map, region_name
):
try:
return _parse_and_create_resource(
logical_id, resource_json, resources_map, region_name, update=True
)
except Exception as e:
LOG.error(
'Unable to parse and update resource "%s": %s %s'
% (logical_id, e, traceback.format_exc())
)
raise
def _parse_and_create_resource(
logical_id,
resource_json,
resources_map,
region_name,
update=False,
force_create=False,
):
stack_name = resources_map.get("AWS::StackName")
resource_hash_key = (stack_name, logical_id)
# If the current stack is being updated, avoid infinite recursion
updating = CURRENTLY_UPDATING_RESOURCES.get(resource_hash_key)
LOG.debug(
"Currently processing stack resource %s/%s: %s"
% (stack_name, logical_id, updating)
)
if updating:
return None
# parse and get final resource JSON
resource_tuple = parsing.parse_resource(
logical_id, resource_json, resources_map
)
if not resource_tuple:
return None
_, resource_json, _ = resource_tuple
# add some missing default props which otherwise cause deployments to fail
props = resource_json["Properties"] = resource_json.get("Properties") or {}
if resource_json["Type"] == "AWS::Lambda::EventSourceMapping" and not props.get(
"StartingPosition"
):
props["StartingPosition"] = "LATEST"
# check if this resource already exists in the resource map
resource = resources_map._parsed_resources.get(logical_id)
if resource and not update and not force_create:
return resource
# check whether this resource needs to be deployed
resource_map_new = dict(resources_map._resource_json_map)
resource_map_new[logical_id] = resource_json
should_be_created = template_deployer.should_be_deployed(
logical_id, resource_map_new, stack_name
)
# fix resource ARNs, make sure to convert account IDs 000000000000 to 123456789012
resource_json_arns_fixed = clone(json_safe(convert_objs_to_ids(resource_json)))
set_moto_account_ids(resource_json_arns_fixed)
# create resource definition and store CloudFormation metadata in moto
if (resource or update) and not force_create:
parse_and_update_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
elif not resource:
try:
resource = parse_and_create_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
except Exception as e:
if should_be_created:
raise
else:
LOG.info(
"Error on moto CF resource creation. Ignoring, as should_be_created=%s: %s"
% (should_be_created, e)
)
# Fix for moto which sometimes hard-codes region name as 'us-east-1'
if hasattr(resource, "region_name") and resource.region_name != region_name:
LOG.debug(
"Updating incorrect region from %s to %s"
% (resource.region_name, region_name)
)
resource.region_name = region_name
# check whether this resource needs to be deployed
is_updateable = False
if not should_be_created:
# This resource is either not deployable or already exists. Check if it can be updated
is_updateable = template_deployer.is_updateable(
logical_id, resource_map_new, stack_name
)
if not update or not is_updateable:
all_satisfied = template_deployer.all_resource_dependencies_satisfied(
logical_id, resource_map_new, stack_name
)
if not all_satisfied:
LOG.info(
"Resource %s cannot be deployed, found unsatisfied dependencies. %s"
% (logical_id, resource_json)
)
details = [logical_id, resource_json, resources_map, region_name]
resources_map._unresolved_resources = getattr(
resources_map, "_unresolved_resources", {}
)
resources_map._unresolved_resources[logical_id] = details
else:
LOG.debug(
"Resource %s need not be deployed (is_updateable=%s): %s %s"
% (logical_id, is_updateable, resource_json, bool(resource))
)
# Return if this resource already exists and can/need not be updated yet
# NOTE: We should always return the resource here, to avoid duplicate
# creation of resources in moto!
return resource
# Apply some fixes/patches to the resource names, then deploy resource in LocalStack
update_resource_name(resource, resource_json)
LOG.debug(
"Deploying CloudFormation resource (update=%s, exists=%s, updateable=%s): %s"
% (update, not should_be_created, is_updateable, resource_json)
)
try:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = True
deploy_func = (
template_deployer.update_resource
if update
else template_deployer.deploy_resource
)
result = deploy_func(logical_id, resource_map_new, stack_name=stack_name)
finally:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = False
if not should_be_created:
# skip the parts below for update requests
return resource
def find_id(resource):
"""Find ID of the given resource."""
if not resource:
return
for id_attr in (
"Id",
"id",
"ResourceId",
"RestApiId",
"DeploymentId",
"RoleId",
):
if id_attr in resource:
return resource[id_attr]
# update resource IDs to avoid mismatch between CF moto and LocalStack backend resources
if hasattr(resource, "id") or (
isinstance(resource, dict) and resource.get("id")
):
existing_id = resource.id if hasattr(resource, "id") else resource["id"]
new_res_id = find_id(result)
LOG.debug(
"Updating resource id: %s - %s, %s - %s"
% (existing_id, new_res_id, resource, resource_json)
)
if new_res_id:
LOG.info(
"Updating resource ID from %s to %s (%s)"
% (existing_id, new_res_id, region_name)
)
update_resource_id(resource, new_res_id, props, region_name)
else:
LOG.warning(
"Unable to extract id for resource %s: %s" % (logical_id, result)
)
# update physical_resource_id field
update_physical_resource_id(resource)
return resource
def update_resource_name(resource, resource_json):
"""Some resources require minor fixes in their CF resource definition
before we can pass them on to deployment."""
props = resource_json["Properties"] = resource_json.get("Properties") or {}
if isinstance(resource, sfn_models.StateMachine) and not props.get(
"StateMachineName"
):
props["StateMachineName"] = resource.name
def update_resource_id(resource, new_id, props, region_name):
"""Update and fix the ID(s) of the given resource."""
# NOTE: this is a bit of a hack, which is required because
# of the order of events when CloudFormation resources are created.
# When we process a request to create a CF resource that's part of a
# stack, say, an API Gateway Resource, then we (1) create the object
# in memory in moto, which generates a random ID for the resource, and
# (2) create the actual resource in the backend service using
# template_deployer.deploy_resource(..) (see above).
# The resource created in (2) now has a different ID than the resource
# created in (1), which leads to downstream problems. Hence, we need
# the logic below to reconcile the ids, i.e., apply IDs from (2) to (1).
backend = apigw_models.apigateway_backends[region_name]
if isinstance(resource, apigw_models.RestAPI):
backend.apis.pop(resource.id, None)
backend.apis[new_id] = resource
# We also need to fetch the resources to replace the root resource
# that moto automatically adds to newly created RestAPI objects
client = aws_stack.connect_to_service("apigateway")
resources = client.get_resources(restApiId=new_id, limit=500)["items"]
# make sure no resources have been added in addition to the root /
assert len(resource.resources) == 1
resource.resources = {}
for res in resources:
res_path_part = res.get("pathPart") or res.get("path")
child = resource.add_child(res_path_part, res.get("parentId"))
resource.resources.pop(child.id)
child.id = res["id"]
child.api_id = new_id
resource.resources[child.id] = child
resource.id = new_id
elif isinstance(resource, apigw_models.Resource):
api_id = props["RestApiId"]
backend.apis[api_id].resources.pop(resource.id, None)
backend.apis[api_id].resources[new_id] = resource
resource.id = new_id
elif isinstance(resource, apigw_models.Deployment):
api_id = props["RestApiId"]
backend.apis[api_id].deployments.pop(resource["id"], None)
backend.apis[api_id].deployments[new_id] = resource
resource["id"] = new_id
else:
LOG.warning(
"Unexpected resource type when updating ID: %s" % type(resource)
)
parse_and_create_resource_orig = parsing.parse_and_create_resource
parsing.parse_and_create_resource = parse_and_create_resource
parse_and_update_resource_orig = parsing.parse_and_update_resource
parsing.parse_and_update_resource = parse_and_update_resource
# Patch CloudFormation parse_output(..) method to fix a bug in moto
def parse_output(output_logical_id, output_json, resources_map):
try:
result = parse_output_orig(output_logical_id, output_json, resources_map)
except KeyError:
result = Output()
result.key = output_logical_id
result.value = None
result.description = output_json.get("Description")
# Make sure output includes export name
if not hasattr(result, "export_name"):
result.export_name = output_json.get("Export", {}).get("Name")
return result
parse_output_orig = parsing.parse_output
parsing.parse_output = parse_output
# Make sure the export name is returned for stack outputs
if "<ExportName>" not in responses.DESCRIBE_STACKS_TEMPLATE:
find = "</OutputValue>"
replace = """</OutputValue>
{% if output.export_name %}
<ExportName>{{ output.export_name }}</ExportName>
{% endif %}
"""
responses.DESCRIBE_STACKS_TEMPLATE = responses.DESCRIBE_STACKS_TEMPLATE.replace(
find, replace
)
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB_Table_get_cfn_attribute(self, attribute_name):
try:
return DynamoDB_Table_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.dynamodb_table_arn(table_name=self.name)
raise
DynamoDB_Table_get_cfn_attribute_orig = dynamodb_models.Table.get_cfn_attribute
dynamodb_models.Table.get_cfn_attribute = DynamoDB_Table_get_cfn_attribute
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB2_Table_get_cfn_attribute(self, attribute_name):
if attribute_name == "Arn":
return aws_stack.dynamodb_table_arn(table_name=self.name)
elif attribute_name == "StreamArn":
if (self.stream_specification or {}).get("StreamEnabled"):
return aws_stack.dynamodb_stream_arn(self.name, "latest")
return None
raise UnformattedGetAttTemplateException()
dynamodb2_models.Table.get_cfn_attribute = DynamoDB2_Table_get_cfn_attribute
# Patch SQS get_cfn_attribute(..) method in moto
def SQS_Queue_get_cfn_attribute(self, attribute_name):
if attribute_name in ["Arn", "QueueArn"]:
return aws_stack.sqs_queue_arn(queue_name=self.name)
return SQS_Queue_get_cfn_attribute_orig(self, attribute_name)
SQS_Queue_get_cfn_attribute_orig = sqs_models.Queue.get_cfn_attribute
sqs_models.Queue.get_cfn_attribute = SQS_Queue_get_cfn_attribute
# Patch S3 Bucket get_cfn_attribute(..) method in moto
def S3_Bucket_get_cfn_attribute(self, attribute_name):
if attribute_name in ["Arn"]:
return aws_stack.s3_bucket_arn(self.name)
return S3_Bucket_get_cfn_attribute_orig(self, attribute_name)
S3_Bucket_get_cfn_attribute_orig = s3_models.FakeBucket.get_cfn_attribute
s3_models.FakeBucket.get_cfn_attribute = S3_Bucket_get_cfn_attribute
# Patch SQS physical_resource_id(..) method in moto
@property
def SQS_Queue_physical_resource_id(self):
result = SQS_Queue_physical_resource_id_orig.fget(self)
if "://" not in result:
# convert ID to queue URL
return aws_stack.get_sqs_queue_url(result)
return result
SQS_Queue_physical_resource_id_orig = sqs_models.Queue.physical_resource_id
sqs_models.Queue.physical_resource_id = SQS_Queue_physical_resource_id
# Patch LogGroup get_cfn_attribute(..) method in moto
def LogGroup_get_cfn_attribute(self, attribute_name):
try:
return LogGroup_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.log_group_arn(self.name)
raise
LogGroup_get_cfn_attribute_orig = getattr(
cw_models.LogGroup, "get_cfn_attribute", None
)
cw_models.LogGroup.get_cfn_attribute = LogGroup_get_cfn_attribute
# Patch Lambda get_cfn_attribute(..) method in moto
def Lambda_Function_get_cfn_attribute(self, attribute_name):
try:
if attribute_name == "Arn":
return self.function_arn
return Lambda_Function_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name in ("Name", "FunctionName"):
return self.function_name
raise
Lambda_Function_get_cfn_attribute_orig = (
lambda_models.LambdaFunction.get_cfn_attribute
)
lambda_models.LambdaFunction.get_cfn_attribute = Lambda_Function_get_cfn_attribute
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB_Table_get_cfn_attribute(self, attribute_name):
try:
if attribute_name == "StreamArn":
streams = aws_stack.connect_to_service("dynamodbstreams").list_streams(
TableName=self.name
)["Streams"]
return streams[0]["StreamArn"] if streams else None
return DynamoDB_Table_get_cfn_attribute_orig(self, attribute_name)
except Exception as e:
LOG.warning(
'Unable to get attribute "%s" from resource %s: %s'
% (attribute_name, type(self), e)
)
raise
DynamoDB_Table_get_cfn_attribute_orig = dynamodb_models.Table.get_cfn_attribute
dynamodb_models.Table.get_cfn_attribute = DynamoDB_Table_get_cfn_attribute
# Patch IAM get_cfn_attribute(..) method in moto
def IAM_Role_get_cfn_attribute(self, attribute_name):
try:
return IAM_Role_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.role_arn(self.name)
raise
IAM_Role_get_cfn_attribute_orig = iam_models.Role.get_cfn_attribute
iam_models.Role.get_cfn_attribute = IAM_Role_get_cfn_attribute
# Patch SNS Topic get_cfn_attribute(..) method in moto
def SNS_Topic_get_cfn_attribute(self, attribute_name):
result = SNS_Topic_get_cfn_attribute_orig(self, attribute_name)
if attribute_name.lower() in ["arn", "topicarn"]:
result = aws_stack.fix_account_id_in_arns(result)
return result
SNS_Topic_get_cfn_attribute_orig = sns_models.Topic.get_cfn_attribute
sns_models.Topic.get_cfn_attribute = SNS_Topic_get_cfn_attribute
# Patch LambdaFunction create_from_cloudformation_json(..) method in moto
@classmethod
def Lambda_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
resource_name = (
cloudformation_json.get("Properties", {}).get("FunctionName")
or resource_name
)
return Lambda_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
Lambda_create_from_cloudformation_json_orig = (
lambda_models.LambdaFunction.create_from_cloudformation_json
)
lambda_models.LambdaFunction.create_from_cloudformation_json = (
Lambda_create_from_cloudformation_json
)
# Patch EventSourceMapping create_from_cloudformation_json(..) method in moto
@classmethod
def Mapping_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json.get("Properties", {})
func_name = props.get("FunctionName") or ""
if ":lambda:" in func_name:
props["FunctionName"] = aws_stack.lambda_function_name(func_name)
return Mapping_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
Mapping_create_from_cloudformation_json_orig = (
lambda_models.EventSourceMapping.create_from_cloudformation_json
)
lambda_models.EventSourceMapping.create_from_cloudformation_json = (
Mapping_create_from_cloudformation_json
)
# Patch LambdaFunction update_from_cloudformation_json(..) method in moto
@classmethod
def Lambda_update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
resource_name = (
cloudformation_json.get("Properties", {}).get("FunctionName")
or new_resource_name
)
return Lambda_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
if not hasattr(lambda_models.LambdaFunction, "update_from_cloudformation_json"):
lambda_models.LambdaFunction.update_from_cloudformation_json = (
Lambda_update_from_cloudformation_json
)
# patch ApiGateway Deployment
def depl_delete_from_cloudformation_json(resource_name, resource_json, region_name):
properties = resource_json["Properties"]
LOG.info(
"TODO: apigateway.Deployment.delete_from_cloudformation_json %s"
% properties
)
if not hasattr(apigw_models.Deployment, "delete_from_cloudformation_json"):
apigw_models.Deployment.delete_from_cloudformation_json = (
depl_delete_from_cloudformation_json
)
# patch Lambda Version
def vers_delete_from_cloudformation_json(resource_name, resource_json, region_name):
properties = resource_json["Properties"]
LOG.info(
"TODO: apigateway.Deployment.delete_from_cloudformation_json %s"
% properties
)
if not hasattr(lambda_models.LambdaVersion, "delete_from_cloudformation_json"):
lambda_models.LambdaVersion.delete_from_cloudformation_json = (
vers_delete_from_cloudformation_json
)
# add CloudFormation types
@classmethod
def RestAPI_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
name = props["Name"]
region_name = props.get("Region") or aws_stack.get_region()
description = props.get("Description") or ""
id = props.get("Id") or short_uid()
return apigw_models.RestAPI(id, region_name, name, description)
def RestAPI_get_cfn_attribute(self, attribute_name):
if attribute_name == "Id":
return self.id
if attribute_name == "Region":
return self.region_name
if attribute_name == "Name":
return self.name
if attribute_name == "Description":
return self.description
if attribute_name == "RootResourceId":
for id, resource in self.resources.items():
if resource.parent_id is None:
return resource.id
return None
raise UnformattedGetAttTemplateException()
@classmethod
def Deployment_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
name = props["StageName"]
deployment_id = props.get("Id") or short_uid()
description = props.get("Description") or ""
return apigw_models.Deployment(deployment_id, name, description)
@classmethod
def Resource_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
region_name = props.get("Region") or aws_stack.get_region()
path_part = props.get("PathPart")
api_id = props.get("RestApiId")
parent_id = props.get("ParentId")
id = props.get("Id") or short_uid()
return apigw_models.Resource(id, region_name, api_id, path_part, parent_id)
@classmethod
def Method_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
method_type = props.get("HttpMethod")
authorization_type = props.get("AuthorizationType")
return apigw_models.Method(method_type, authorization_type)
apigw_models.RestAPI.create_from_cloudformation_json = (
RestAPI_create_from_cloudformation_json
)
apigw_models.RestAPI.get_cfn_attribute = RestAPI_get_cfn_attribute
apigw_models.Deployment.create_from_cloudformation_json = (
Deployment_create_from_cloudformation_json
)
apigw_models.Resource.create_from_cloudformation_json = (
Resource_create_from_cloudformation_json
)
apigw_models.Method.create_from_cloudformation_json = (
Method_create_from_cloudformation_json
)
# TODO: add support for AWS::ApiGateway::Model, AWS::ApiGateway::RequestValidator, ...
# fix AttributeError in moto's CloudFormation describe_stack_resource
def describe_stack_resource(self):
stack_name = self._get_param("StackName")
stack = self.cloudformation_backend.get_stack(stack_name)
logical_resource_id = self._get_param("LogicalResourceId")
if not stack:
msg = 'Unable to find CloudFormation stack "%s" in region %s' % (
stack_name,
aws_stack.get_region(),
)
if aws_stack.get_region() != self.region:
msg = "%s/%s" % (msg, self.region)
LOG.warning(msg)
response = aws_responses.flask_error_response(
msg, code=404, error_type="ResourceNotFoundException"
)
return 404, response.headers, response.data
for stack_resource in stack.stack_resources:
# Note: Line below has been patched
# if stack_resource.logical_resource_id == logical_resource_id:
if (
stack_resource
and stack_resource.logical_resource_id == logical_resource_id
):
resource = stack_resource
break
else:
raise ValidationError(logical_resource_id)
template = self.response_template(
responses.DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE
)
return template.render(stack=stack, resource=resource)
responses.CloudFormationResponse.describe_stack_resource = describe_stack_resource
# fix moto's describe_stack_events jinja2.exceptions.UndefinedError
def cf_describe_stack_events(self):
stack_name = self._get_param("StackName")
backend = self.cloudformation_backend
stack = backend.get_stack(stack_name)
if not stack:
# Also return stack events for deleted stacks, specified by stack name
stack = (
[
stk
for id, stk in backend.deleted_stacks.items()
if stk.name == stack_name
]
or [0]
)[0]
if not stack:
raise ValidationError(
stack_name,
message='Unable to find stack "%s" in region %s'
% (stack_name, aws_stack.get_region()),
)
template = self.response_template(responses.DESCRIBE_STACK_EVENTS_RESPONSE)
return template.render(stack=stack)
responses.CloudFormationResponse.describe_stack_events = cf_describe_stack_events
# fix Lambda regions in moto - see https://github.com/localstack/localstack/issues/1961
for region in boto3.session.Session().get_available_regions("lambda"):
if region not in lambda_models.lambda_backends:
lambda_models.lambda_backends[region] = lambda_models.LambdaBackend(region)
# patch FakeStack.initialize_resources
def initialize_resources(self):
def set_status(status):
self._add_stack_event(status)
self.status = status
self.resource_map.create()
self.output_map.create()
def run_loop(*args):
# NOTE: We're adding this additional loop, as it seems that in some cases moto
# does not consider resource dependencies (e.g., if a "DependsOn" resource property
# is defined). This loop allows us to incrementally resolve such dependencies.
resource_map = self.resource_map
unresolved = {}
for i in range(MAX_DEPENDENCY_DEPTH):
unresolved = getattr(resource_map, "_unresolved_resources", {})
if not unresolved:
set_status("CREATE_COMPLETE")
return resource_map
resource_map._unresolved_resources = {}
for resource_id, resource_details in unresolved.items():
# Re-trigger the resource creation
parse_and_create_resource(*resource_details, force_create=True)
if unresolved.keys() == resource_map._unresolved_resources.keys():
# looks like no more resources can be resolved -> bail
LOG.warning(
"Unresolvable dependencies, there may be undeployed stack resources: %s"
% unresolved
)
break
set_status("CREATE_FAILED")
raise Exception(
"Unable to resolve all CloudFormation resources after traversing "
+ "dependency tree (maximum depth %s reached): %s"
% (MAX_DEPENDENCY_DEPTH, unresolved.keys())
)
# NOTE: We're running the loop in the background, as it might take some time to complete
FuncThread(run_loop).start()
FakeStack.initialize_resources = initialize_resources
|
def apply_patches():
"""Apply patches to make LocalStack seamlessly interact with the moto backend.
TODO: Eventually, these patches should be contributed to the upstream repo!"""
# add model mappings to moto
parsing.MODEL_MAP.update(MODEL_MAP)
# Patch S3Backend.get_key method in moto to use S3 API from LocalStack
def get_key(self, bucket_name, key_name, version_id=None):
s3_client = aws_stack.connect_to_service("s3")
value = b""
if bucket_name != BUCKET_MARKER_LOCAL:
value = s3_client.get_object(Bucket=bucket_name, Key=key_name)[
"Body"
].read()
return s3_models.FakeKey(name=key_name, value=value)
s3_models.S3Backend.get_key = get_key
# Patch clean_json in moto
def clean_json(resource_json, resources_map):
result = clean_json_orig(resource_json, resources_map)
if isinstance(result, BaseModel):
if isinstance(resource_json, dict) and "Ref" in resource_json:
entity_id = get_entity_id(result, resource_json)
if entity_id:
return entity_id
LOG.warning(
'Unable to resolve "Ref" attribute for: %s - %s - %s',
resource_json,
result,
type(result),
)
return result
clean_json_orig = parsing.clean_json
parsing.clean_json = clean_json
# Patch parse_and_create_resource method in moto to deploy resources in LocalStack
def parse_and_create_resource(
logical_id, resource_json, resources_map, region_name, force_create=False
):
try:
return _parse_and_create_resource(
logical_id,
resource_json,
resources_map,
region_name,
force_create=force_create,
)
except Exception as e:
LOG.error(
'Unable to parse and create resource "%s": %s %s'
% (logical_id, e, traceback.format_exc())
)
raise
def parse_and_update_resource(
logical_id, resource_json, resources_map, region_name
):
try:
return _parse_and_create_resource(
logical_id, resource_json, resources_map, region_name, update=True
)
except Exception as e:
LOG.error(
'Unable to parse and update resource "%s": %s %s'
% (logical_id, e, traceback.format_exc())
)
raise
def _parse_and_create_resource(
logical_id,
resource_json,
resources_map,
region_name,
update=False,
force_create=False,
):
stack_name = resources_map.get("AWS::StackName")
resource_hash_key = (stack_name, logical_id)
# If the current stack is being updated, avoid infinite recursion
updating = CURRENTLY_UPDATING_RESOURCES.get(resource_hash_key)
LOG.debug(
"Currently processing stack resource %s/%s: %s"
% (stack_name, logical_id, updating)
)
if updating:
return None
# parse and get final resource JSON
resource_tuple = parsing.parse_resource(
logical_id, resource_json, resources_map
)
if not resource_tuple:
return None
_, resource_json, _ = resource_tuple
# add some missing default props which otherwise cause deployments to fail
props = resource_json["Properties"] = resource_json.get("Properties") or {}
if resource_json["Type"] == "AWS::Lambda::EventSourceMapping" and not props.get(
"StartingPosition"
):
props["StartingPosition"] = "LATEST"
# check if this resource already exists in the resource map
resource = resources_map._parsed_resources.get(logical_id)
if resource and not update and not force_create:
return resource
# check whether this resource needs to be deployed
resource_map_new = dict(resources_map._resource_json_map)
resource_map_new[logical_id] = resource_json
should_be_created = template_deployer.should_be_deployed(
logical_id, resource_map_new, stack_name
)
# fix resource ARNs, make sure to convert account IDs 000000000000 to 123456789012
resource_json_arns_fixed = clone(json_safe(convert_objs_to_ids(resource_json)))
set_moto_account_ids(resource_json_arns_fixed)
# create resource definition and store CloudFormation metadata in moto
if (resource or update) and not force_create:
parse_and_update_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
elif not resource:
try:
resource = parse_and_create_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
except Exception as e:
if should_be_created:
raise
else:
LOG.info(
"Error on moto CF resource creation. Ignoring, as should_be_created=%s: %s"
% (should_be_created, e)
)
# Fix for moto which sometimes hard-codes region name as 'us-east-1'
if hasattr(resource, "region_name") and resource.region_name != region_name:
LOG.debug(
"Updating incorrect region from %s to %s"
% (resource.region_name, region_name)
)
resource.region_name = region_name
# check whether this resource needs to be deployed
is_updateable = False
if not should_be_created:
# This resource is either not deployable or already exists. Check if it can be updated
is_updateable = template_deployer.is_updateable(
logical_id, resource_map_new, stack_name
)
if not update or not is_updateable:
all_satisfied = template_deployer.all_resource_dependencies_satisfied(
logical_id, resource_map_new, stack_name
)
if not all_satisfied:
LOG.info(
"Resource %s cannot be deployed, found unsatisfied dependencies. %s"
% (logical_id, resource_json)
)
details = [logical_id, resource_json, resources_map, region_name]
resources_map._unresolved_resources = getattr(
resources_map, "_unresolved_resources", {}
)
resources_map._unresolved_resources[logical_id] = details
else:
LOG.debug(
"Resource %s need not be deployed (is_updateable=%s): %s %s"
% (logical_id, is_updateable, resource_json, bool(resource))
)
# Return if this resource already exists and can/need not be updated yet
# NOTE: We should always return the resource here, to avoid duplicate
# creation of resources in moto!
return resource
# Apply some fixes/patches to the resource names, then deploy resource in LocalStack
update_resource_name(resource, resource_json)
LOG.debug(
"Deploying CloudFormation resource (update=%s, exists=%s, updateable=%s): %s"
% (update, not should_be_created, is_updateable, resource_json)
)
try:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = True
deploy_func = (
template_deployer.update_resource
if update
else template_deployer.deploy_resource
)
result = deploy_func(logical_id, resource_map_new, stack_name=stack_name)
finally:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = False
if not should_be_created:
# skip the parts below for update requests
return resource
def find_id(resource):
"""Find ID of the given resource."""
if not resource:
return
for id_attr in (
"Id",
"id",
"ResourceId",
"RestApiId",
"DeploymentId",
"RoleId",
):
if id_attr in resource:
return resource[id_attr]
# update resource IDs to avoid mismatch between CF moto and LocalStack backend resources
if hasattr(resource, "id") or (
isinstance(resource, dict) and resource.get("id")
):
existing_id = resource.id if hasattr(resource, "id") else resource["id"]
new_res_id = find_id(result)
LOG.debug(
"Updating resource id: %s - %s, %s - %s"
% (existing_id, new_res_id, resource, resource_json)
)
if new_res_id:
LOG.info(
"Updating resource ID from %s to %s (%s)"
% (existing_id, new_res_id, region_name)
)
update_resource_id(resource, new_res_id, props, region_name)
else:
LOG.warning(
"Unable to extract id for resource %s: %s" % (logical_id, result)
)
# update physical_resource_id field
update_physical_resource_id(resource)
return resource
def update_resource_name(resource, resource_json):
"""Some resources require minor fixes in their CF resource definition
before we can pass them on to deployment."""
props = resource_json["Properties"] = resource_json.get("Properties") or {}
if isinstance(resource, sfn_models.StateMachine) and not props.get(
"StateMachineName"
):
props["StateMachineName"] = resource.name
def update_resource_id(resource, new_id, props, region_name):
"""Update and fix the ID(s) of the given resource."""
# NOTE: this is a bit of a hack, which is required because
# of the order of events when CloudFormation resources are created.
# When we process a request to create a CF resource that's part of a
# stack, say, an API Gateway Resource, then we (1) create the object
# in memory in moto, which generates a random ID for the resource, and
# (2) create the actual resource in the backend service using
# template_deployer.deploy_resource(..) (see above).
# The resource created in (2) now has a different ID than the resource
# created in (1), which leads to downstream problems. Hence, we need
# the logic below to reconcile the ids, i.e., apply IDs from (2) to (1).
backend = apigw_models.apigateway_backends[region_name]
if isinstance(resource, apigw_models.RestAPI):
backend.apis.pop(resource.id, None)
backend.apis[new_id] = resource
# We also need to fetch the resources to replace the root resource
# that moto automatically adds to newly created RestAPI objects
client = aws_stack.connect_to_service("apigateway")
resources = client.get_resources(restApiId=new_id, limit=500)["items"]
# make sure no resources have been added in addition to the root /
assert len(resource.resources) == 1
resource.resources = {}
for res in resources:
res_path_part = res.get("pathPart") or res.get("path")
child = resource.add_child(res_path_part, res.get("parentId"))
resource.resources.pop(child.id)
child.id = res["id"]
child.api_id = new_id
resource.resources[child.id] = child
resource.id = new_id
elif isinstance(resource, apigw_models.Resource):
api_id = props["RestApiId"]
backend.apis[api_id].resources.pop(resource.id, None)
backend.apis[api_id].resources[new_id] = resource
resource.id = new_id
elif isinstance(resource, apigw_models.Deployment):
api_id = props["RestApiId"]
backend.apis[api_id].deployments.pop(resource["id"], None)
backend.apis[api_id].deployments[new_id] = resource
resource["id"] = new_id
else:
LOG.warning(
"Unexpected resource type when updating ID: %s" % type(resource)
)
parse_and_create_resource_orig = parsing.parse_and_create_resource
parsing.parse_and_create_resource = parse_and_create_resource
parse_and_update_resource_orig = parsing.parse_and_update_resource
parsing.parse_and_update_resource = parse_and_update_resource
# Patch CloudFormation parse_output(..) method to fix a bug in moto
def parse_output(output_logical_id, output_json, resources_map):
try:
result = parse_output_orig(output_logical_id, output_json, resources_map)
except KeyError:
result = Output()
result.key = output_logical_id
result.value = None
result.description = output_json.get("Description")
# Make sure output includes export name
if not hasattr(result, "export_name"):
result.export_name = output_json.get("Export", {}).get("Name")
return result
parse_output_orig = parsing.parse_output
parsing.parse_output = parse_output
# Make sure the export name is returned for stack outputs
if "<ExportName>" not in responses.DESCRIBE_STACKS_TEMPLATE:
find = "</OutputValue>"
replace = """</OutputValue>
{% if output.export_name %}
<ExportName>{{ output.export_name }}</ExportName>
{% endif %}
"""
responses.DESCRIBE_STACKS_TEMPLATE = responses.DESCRIBE_STACKS_TEMPLATE.replace(
find, replace
)
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB_Table_get_cfn_attribute(self, attribute_name):
try:
return DynamoDB_Table_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.dynamodb_table_arn(table_name=self.name)
raise
DynamoDB_Table_get_cfn_attribute_orig = dynamodb_models.Table.get_cfn_attribute
dynamodb_models.Table.get_cfn_attribute = DynamoDB_Table_get_cfn_attribute
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB2_Table_get_cfn_attribute(self, attribute_name):
if attribute_name == "Arn":
return aws_stack.dynamodb_table_arn(table_name=self.name)
elif attribute_name == "StreamArn":
if (self.stream_specification or {}).get("StreamEnabled"):
return aws_stack.dynamodb_stream_arn(self.name, "latest")
return None
raise UnformattedGetAttTemplateException()
dynamodb2_models.Table.get_cfn_attribute = DynamoDB2_Table_get_cfn_attribute
# Patch SQS get_cfn_attribute(..) method in moto
def SQS_Queue_get_cfn_attribute(self, attribute_name):
if attribute_name in ["Arn", "QueueArn"]:
return aws_stack.sqs_queue_arn(queue_name=self.name)
return SQS_Queue_get_cfn_attribute_orig(self, attribute_name)
SQS_Queue_get_cfn_attribute_orig = sqs_models.Queue.get_cfn_attribute
sqs_models.Queue.get_cfn_attribute = SQS_Queue_get_cfn_attribute
# Patch S3 Bucket get_cfn_attribute(..) method in moto
def S3_Bucket_get_cfn_attribute(self, attribute_name):
if attribute_name in ["Arn"]:
return aws_stack.s3_bucket_arn(self.name)
return S3_Bucket_get_cfn_attribute_orig(self, attribute_name)
S3_Bucket_get_cfn_attribute_orig = s3_models.FakeBucket.get_cfn_attribute
s3_models.FakeBucket.get_cfn_attribute = S3_Bucket_get_cfn_attribute
# Patch SQS physical_resource_id(..) method in moto
@property
def SQS_Queue_physical_resource_id(self):
result = SQS_Queue_physical_resource_id_orig.fget(self)
if "://" not in result:
# convert ID to queue URL
return aws_stack.get_sqs_queue_url(result)
return result
SQS_Queue_physical_resource_id_orig = sqs_models.Queue.physical_resource_id
sqs_models.Queue.physical_resource_id = SQS_Queue_physical_resource_id
# Patch LogGroup get_cfn_attribute(..) method in moto
def LogGroup_get_cfn_attribute(self, attribute_name):
try:
return LogGroup_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.log_group_arn(self.name)
raise
LogGroup_get_cfn_attribute_orig = getattr(
cw_models.LogGroup, "get_cfn_attribute", None
)
cw_models.LogGroup.get_cfn_attribute = LogGroup_get_cfn_attribute
# Patch Lambda get_cfn_attribute(..) method in moto
def Lambda_Function_get_cfn_attribute(self, attribute_name):
try:
if attribute_name == "Arn":
return self.function_arn
return Lambda_Function_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name in ("Name", "FunctionName"):
return self.function_name
raise
Lambda_Function_get_cfn_attribute_orig = (
lambda_models.LambdaFunction.get_cfn_attribute
)
lambda_models.LambdaFunction.get_cfn_attribute = Lambda_Function_get_cfn_attribute
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB_Table_get_cfn_attribute(self, attribute_name):
try:
if attribute_name == "StreamArn":
streams = aws_stack.connect_to_service("dynamodbstreams").list_streams(
TableName=self.name
)["Streams"]
return streams[0]["StreamArn"] if streams else None
return DynamoDB_Table_get_cfn_attribute_orig(self, attribute_name)
except Exception as e:
LOG.warning(
'Unable to get attribute "%s" from resource %s: %s'
% (attribute_name, type(self), e)
)
raise
DynamoDB_Table_get_cfn_attribute_orig = dynamodb_models.Table.get_cfn_attribute
dynamodb_models.Table.get_cfn_attribute = DynamoDB_Table_get_cfn_attribute
# Patch IAM get_cfn_attribute(..) method in moto
def IAM_Role_get_cfn_attribute(self, attribute_name):
try:
return IAM_Role_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.role_arn(self.name)
raise
IAM_Role_get_cfn_attribute_orig = iam_models.Role.get_cfn_attribute
iam_models.Role.get_cfn_attribute = IAM_Role_get_cfn_attribute
# Patch SNS Topic get_cfn_attribute(..) method in moto
def SNS_Topic_get_cfn_attribute(self, attribute_name):
result = SNS_Topic_get_cfn_attribute_orig(self, attribute_name)
if attribute_name.lower() in ["arn", "topicarn"]:
result = aws_stack.fix_account_id_in_arns(result)
return result
SNS_Topic_get_cfn_attribute_orig = sns_models.Topic.get_cfn_attribute
sns_models.Topic.get_cfn_attribute = SNS_Topic_get_cfn_attribute
# Patch LambdaFunction create_from_cloudformation_json(..) method in moto
@classmethod
def Lambda_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
resource_name = (
cloudformation_json.get("Properties", {}).get("FunctionName")
or resource_name
)
return Lambda_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
Lambda_create_from_cloudformation_json_orig = (
lambda_models.LambdaFunction.create_from_cloudformation_json
)
lambda_models.LambdaFunction.create_from_cloudformation_json = (
Lambda_create_from_cloudformation_json
)
# Patch EventSourceMapping create_from_cloudformation_json(..) method in moto
@classmethod
def Mapping_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json.get("Properties", {})
func_name = props.get("FunctionName") or ""
if ":lambda:" in func_name:
props["FunctionName"] = aws_stack.lambda_function_name(func_name)
return Mapping_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
Mapping_create_from_cloudformation_json_orig = (
lambda_models.EventSourceMapping.create_from_cloudformation_json
)
lambda_models.EventSourceMapping.create_from_cloudformation_json = (
Mapping_create_from_cloudformation_json
)
# Patch LambdaFunction update_from_cloudformation_json(..) method in moto
@classmethod
def Lambda_update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
resource_name = (
cloudformation_json.get("Properties", {}).get("FunctionName")
or new_resource_name
)
return Lambda_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
if not hasattr(lambda_models.LambdaFunction, "update_from_cloudformation_json"):
lambda_models.LambdaFunction.update_from_cloudformation_json = (
Lambda_update_from_cloudformation_json
)
# patch ApiGateway Deployment
def depl_delete_from_cloudformation_json(resource_name, resource_json, region_name):
properties = resource_json["Properties"]
LOG.info(
"TODO: apigateway.Deployment.delete_from_cloudformation_json %s"
% properties
)
if not hasattr(apigw_models.Deployment, "delete_from_cloudformation_json"):
apigw_models.Deployment.delete_from_cloudformation_json = (
depl_delete_from_cloudformation_json
)
# patch Lambda Version
def vers_delete_from_cloudformation_json(resource_name, resource_json, region_name):
properties = resource_json["Properties"]
LOG.info(
"TODO: apigateway.Deployment.delete_from_cloudformation_json %s"
% properties
)
if not hasattr(lambda_models.LambdaVersion, "delete_from_cloudformation_json"):
lambda_models.LambdaVersion.delete_from_cloudformation_json = (
vers_delete_from_cloudformation_json
)
# add CloudFormation types
@classmethod
def RestAPI_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
name = props["Name"]
region_name = props.get("Region") or aws_stack.get_region()
description = props.get("Description") or ""
id = props.get("Id") or short_uid()
return apigw_models.RestAPI(id, region_name, name, description)
def RestAPI_get_cfn_attribute(self, attribute_name):
if attribute_name == "Id":
return self.id
if attribute_name == "Region":
return self.region_name
if attribute_name == "Name":
return self.name
if attribute_name == "Description":
return self.description
if attribute_name == "RootResourceId":
for id, resource in self.resources.items():
if resource.parent_id is None:
return resource.id
return None
raise UnformattedGetAttTemplateException()
@classmethod
def Deployment_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
name = props["StageName"]
deployment_id = props.get("Id") or short_uid()
description = props.get("Description") or ""
return apigw_models.Deployment(deployment_id, name, description)
@classmethod
def Resource_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
region_name = props.get("Region") or aws_stack.get_region()
path_part = props.get("PathPart")
api_id = props.get("RestApiId")
parent_id = props.get("ParentId")
id = props.get("Id") or short_uid()
return apigw_models.Resource(id, region_name, api_id, path_part, parent_id)
@classmethod
def Method_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
method_type = props.get("HttpMethod")
authorization_type = props.get("AuthorizationType")
return apigw_models.Method(method_type, authorization_type)
apigw_models.RestAPI.create_from_cloudformation_json = (
RestAPI_create_from_cloudformation_json
)
apigw_models.RestAPI.get_cfn_attribute = RestAPI_get_cfn_attribute
apigw_models.Deployment.create_from_cloudformation_json = (
Deployment_create_from_cloudformation_json
)
apigw_models.Resource.create_from_cloudformation_json = (
Resource_create_from_cloudformation_json
)
apigw_models.Method.create_from_cloudformation_json = (
Method_create_from_cloudformation_json
)
# TODO: add support for AWS::ApiGateway::Model, AWS::ApiGateway::RequestValidator, ...
# fix AttributeError in moto's CloudFormation describe_stack_resource
def describe_stack_resource(self):
stack_name = self._get_param("StackName")
stack = self.cloudformation_backend.get_stack(stack_name)
logical_resource_id = self._get_param("LogicalResourceId")
if not stack:
msg = 'Unable to find CloudFormation stack "%s" in region %s' % (
stack_name,
aws_stack.get_region(),
)
if aws_stack.get_region() != self.region:
msg = "%s/%s" % (msg, self.region)
LOG.warning(msg)
response = aws_responses.flask_error_response(
msg, code=404, error_type="ResourceNotFoundException"
)
return 404, response.headers, response.data
for stack_resource in stack.stack_resources:
# Note: Line below has been patched
# if stack_resource.logical_resource_id == logical_resource_id:
if (
stack_resource
and stack_resource.logical_resource_id == logical_resource_id
):
resource = stack_resource
break
else:
raise ValidationError(logical_resource_id)
template = self.response_template(
responses.DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE
)
return template.render(stack=stack, resource=resource)
responses.CloudFormationResponse.describe_stack_resource = describe_stack_resource
# fix moto's describe_stack_events jinja2.exceptions.UndefinedError
def cf_describe_stack_events(self):
stack_name = self._get_param("StackName")
backend = self.cloudformation_backend
stack = backend.get_stack(stack_name)
if not stack:
# Also return stack events for deleted stacks, specified by stack name
stack = (
[
stk
for id, stk in backend.deleted_stacks.items()
if stk.name == stack_name
]
or [0]
)[0]
if not stack:
raise ValidationError(
stack_name,
message='Unable to find stack "%s" in region %s'
% (stack_name, aws_stack.get_region()),
)
template = self.response_template(responses.DESCRIBE_STACK_EVENTS_RESPONSE)
return template.render(stack=stack)
responses.CloudFormationResponse.describe_stack_events = cf_describe_stack_events
# fix Lambda regions in moto - see https://github.com/localstack/localstack/issues/1961
for region in boto3.session.Session().get_available_regions("lambda"):
if region not in lambda_models.lambda_backends:
lambda_models.lambda_backends[region] = lambda_models.LambdaBackend(region)
# patch FakeStack.initialize_resources
def initialize_resources(self):
def set_status(status):
self._add_stack_event(status)
self.status = status
self.resource_map.create()
self.output_map.create()
def run_loop(*args):
# NOTE: We're adding this additional loop, as it seems that in some cases moto
# does not consider resource dependencies (e.g., if a "DependsOn" resource property
# is defined). This loop allows us to incrementally resolve such dependencies.
resource_map = self.resource_map
for i in range(MAX_DEPENDENCY_DEPTH):
unresolved = getattr(resource_map, "_unresolved_resources", {})
if not unresolved:
set_status("CREATE_COMPLETE")
return resource_map
resource_map._unresolved_resources = {}
for resource_id, resource_details in unresolved.items():
# Re-trigger the resource creation
parse_and_create_resource(*resource_details, force_create=True)
if unresolved.keys() == resource_map._unresolved_resources.keys():
# looks like no more resources can be resolved -> bail
LOG.warning(
"Unresolvable dependencies, there may be undeployed stack resources: %s"
% unresolved
)
break
set_status("CREATE_FAILED")
raise Exception(
"Unable to resolve all CloudFormation resources after traversing "
+ "dependency tree (maximum depth %s reached)" % MAX_DEPENDENCY_DEPTH
)
# NOTE: We're running the loop in the background, as it might take some time to complete
FuncThread(run_loop).start()
FakeStack.initialize_resources = initialize_resources
|
https://github.com/localstack/localstack/issues/2020
|
2020-02-05T17:05:30:ERROR:flask.app: Exception on / [POST]
Traceback (most recent call last):
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask_cors/extension.py", line 161, in wrapped_function
return cors_after_request(app.make_response(f(*args, **kwargs)))
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/code/localstack/localstack/services/firehose/firehose_api.py", line 276, in post_request
es_update=es_update, version_id=version_id)
TypeError: update_destination() got an unexpected keyword argument 'es_update'
|
TypeError
|
def initialize_resources(self):
def set_status(status):
self._add_stack_event(status)
self.status = status
self.resource_map.create()
self.output_map.create()
def run_loop(*args):
# NOTE: We're adding this additional loop, as it seems that in some cases moto
# does not consider resource dependencies (e.g., if a "DependsOn" resource property
# is defined). This loop allows us to incrementally resolve such dependencies.
resource_map = self.resource_map
unresolved = {}
for i in range(MAX_DEPENDENCY_DEPTH):
unresolved = getattr(resource_map, "_unresolved_resources", {})
if not unresolved:
set_status("CREATE_COMPLETE")
return resource_map
resource_map._unresolved_resources = {}
for resource_id, resource_details in unresolved.items():
# Re-trigger the resource creation
parse_and_create_resource(*resource_details, force_create=True)
if unresolved.keys() == resource_map._unresolved_resources.keys():
# looks like no more resources can be resolved -> bail
LOG.warning(
"Unresolvable dependencies, there may be undeployed stack resources: %s"
% unresolved
)
break
set_status("CREATE_FAILED")
raise Exception(
"Unable to resolve all CloudFormation resources after traversing "
+ "dependency tree (maximum depth %s reached): %s"
% (MAX_DEPENDENCY_DEPTH, unresolved.keys())
)
# NOTE: We're running the loop in the background, as it might take some time to complete
FuncThread(run_loop).start()
|
def initialize_resources(self):
def set_status(status):
self._add_stack_event(status)
self.status = status
self.resource_map.create()
self.output_map.create()
def run_loop(*args):
# NOTE: We're adding this additional loop, as it seems that in some cases moto
# does not consider resource dependencies (e.g., if a "DependsOn" resource property
# is defined). This loop allows us to incrementally resolve such dependencies.
resource_map = self.resource_map
for i in range(MAX_DEPENDENCY_DEPTH):
unresolved = getattr(resource_map, "_unresolved_resources", {})
if not unresolved:
set_status("CREATE_COMPLETE")
return resource_map
resource_map._unresolved_resources = {}
for resource_id, resource_details in unresolved.items():
# Re-trigger the resource creation
parse_and_create_resource(*resource_details, force_create=True)
if unresolved.keys() == resource_map._unresolved_resources.keys():
# looks like no more resources can be resolved -> bail
LOG.warning(
"Unresolvable dependencies, there may be undeployed stack resources: %s"
% unresolved
)
break
set_status("CREATE_FAILED")
raise Exception(
"Unable to resolve all CloudFormation resources after traversing "
+ "dependency tree (maximum depth %s reached)" % MAX_DEPENDENCY_DEPTH
)
# NOTE: We're running the loop in the background, as it might take some time to complete
FuncThread(run_loop).start()
|
https://github.com/localstack/localstack/issues/2020
|
2020-02-05T17:05:30:ERROR:flask.app: Exception on / [POST]
Traceback (most recent call last):
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask_cors/extension.py", line 161, in wrapped_function
return cors_after_request(app.make_response(f(*args, **kwargs)))
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/code/localstack/localstack/services/firehose/firehose_api.py", line 276, in post_request
es_update=es_update, version_id=version_id)
TypeError: update_destination() got an unexpected keyword argument 'es_update'
|
TypeError
|
def run_loop(*args):
# NOTE: We're adding this additional loop, as it seems that in some cases moto
# does not consider resource dependencies (e.g., if a "DependsOn" resource property
# is defined). This loop allows us to incrementally resolve such dependencies.
resource_map = self.resource_map
unresolved = {}
for i in range(MAX_DEPENDENCY_DEPTH):
unresolved = getattr(resource_map, "_unresolved_resources", {})
if not unresolved:
set_status("CREATE_COMPLETE")
return resource_map
resource_map._unresolved_resources = {}
for resource_id, resource_details in unresolved.items():
# Re-trigger the resource creation
parse_and_create_resource(*resource_details, force_create=True)
if unresolved.keys() == resource_map._unresolved_resources.keys():
# looks like no more resources can be resolved -> bail
LOG.warning(
"Unresolvable dependencies, there may be undeployed stack resources: %s"
% unresolved
)
break
set_status("CREATE_FAILED")
raise Exception(
"Unable to resolve all CloudFormation resources after traversing "
+ "dependency tree (maximum depth %s reached): %s"
% (MAX_DEPENDENCY_DEPTH, unresolved.keys())
)
|
def run_loop(*args):
# NOTE: We're adding this additional loop, as it seems that in some cases moto
# does not consider resource dependencies (e.g., if a "DependsOn" resource property
# is defined). This loop allows us to incrementally resolve such dependencies.
resource_map = self.resource_map
for i in range(MAX_DEPENDENCY_DEPTH):
unresolved = getattr(resource_map, "_unresolved_resources", {})
if not unresolved:
set_status("CREATE_COMPLETE")
return resource_map
resource_map._unresolved_resources = {}
for resource_id, resource_details in unresolved.items():
# Re-trigger the resource creation
parse_and_create_resource(*resource_details, force_create=True)
if unresolved.keys() == resource_map._unresolved_resources.keys():
# looks like no more resources can be resolved -> bail
LOG.warning(
"Unresolvable dependencies, there may be undeployed stack resources: %s"
% unresolved
)
break
set_status("CREATE_FAILED")
raise Exception(
"Unable to resolve all CloudFormation resources after traversing "
+ "dependency tree (maximum depth %s reached)" % MAX_DEPENDENCY_DEPTH
)
|
https://github.com/localstack/localstack/issues/2020
|
2020-02-05T17:05:30:ERROR:flask.app: Exception on / [POST]
Traceback (most recent call last):
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask_cors/extension.py", line 161, in wrapped_function
return cors_after_request(app.make_response(f(*args, **kwargs)))
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/code/localstack/localstack/services/firehose/firehose_api.py", line 276, in post_request
es_update=es_update, version_id=version_id)
TypeError: update_destination() got an unexpected keyword argument 'es_update'
|
TypeError
|
def start_elasticsearch_instance():
# Note: keep imports here to avoid circular dependencies
from localstack.services.es import es_starter
from localstack.services.infra import check_infra, Plugin
api_name = "elasticsearch"
plugin = Plugin(
api_name,
start=es_starter.start_elasticsearch,
check=es_starter.check_elasticsearch,
)
t1 = plugin.start(asynchronous=True)
# sleep some time to give Elasticsearch enough time to come up
time.sleep(8)
apis = [api_name]
# ensure that all infra components are up and running
check_infra(apis=apis, additional_checks=[es_starter.check_elasticsearch])
return t1
|
def start_elasticsearch_instance():
# Note: keep imports here to avoid circular dependencies
from localstack.services.es import es_starter
from localstack.services.infra import check_infra, Plugin
api_name = "elasticsearch"
plugin = Plugin(
api_name,
start=es_starter.start_elasticsearch,
check=es_starter.check_elasticsearch,
)
t1 = plugin.start(asynchronous=True)
# sleep some time to give Elasticsearch enough time to come up
time.sleep(8)
apis = [api_name]
# ensure that all infra components are up and running
check_infra(apis=apis, additional_checks=[es_starter.check_elasticsearch])
# restore persisted data
persistence.restore_persisted_data(apis=apis)
return t1
|
https://github.com/localstack/localstack/issues/2020
|
2020-02-05T17:05:30:ERROR:flask.app: Exception on / [POST]
Traceback (most recent call last):
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask_cors/extension.py", line 161, in wrapped_function
return cors_after_request(app.make_response(f(*args, **kwargs)))
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/code/localstack/localstack/services/firehose/firehose_api.py", line 276, in post_request
es_update=es_update, version_id=version_id)
TypeError: update_destination() got an unexpected keyword argument 'es_update'
|
TypeError
|
def post_request():
action = request.headers.get("x-amz-target")
data = json.loads(to_str(request.data))
response = None
if action == "%s.ListDeliveryStreams" % ACTION_HEADER_PREFIX:
response = {
"DeliveryStreamNames": get_delivery_stream_names(),
"HasMoreDeliveryStreams": False,
}
elif action == "%s.CreateDeliveryStream" % ACTION_HEADER_PREFIX:
stream_name = data["DeliveryStreamName"]
region_name = extract_region_from_auth_header(request.headers)
response = create_stream(
stream_name,
delivery_stream_type=data.get("DeliveryStreamType"),
delivery_stream_type_configuration=data.get(
"KinesisStreamSourceConfiguration"
),
s3_destination=data.get("S3DestinationConfiguration"),
elasticsearch_destination=data.get("ElasticsearchDestinationConfiguration"),
tags=data.get("Tags"),
region_name=region_name,
)
elif action == "%s.DeleteDeliveryStream" % ACTION_HEADER_PREFIX:
stream_name = data["DeliveryStreamName"]
response = delete_stream(stream_name)
elif action == "%s.DescribeDeliveryStream" % ACTION_HEADER_PREFIX:
stream_name = data["DeliveryStreamName"]
response = get_stream(stream_name)
if not response:
return error_not_found(stream_name)
response = {"DeliveryStreamDescription": response}
elif action == "%s.PutRecord" % ACTION_HEADER_PREFIX:
stream_name = data["DeliveryStreamName"]
record = data["Record"]
put_record(stream_name, record)
response = {"RecordId": str(uuid.uuid4())}
elif action == "%s.PutRecordBatch" % ACTION_HEADER_PREFIX:
stream_name = data["DeliveryStreamName"]
records = data["Records"]
put_records(stream_name, records)
request_responses = []
for i in records:
request_responses.append({"RecordId": str(uuid.uuid4())})
response = {"FailedPutCount": 0, "RequestResponses": request_responses}
elif action == "%s.UpdateDestination" % ACTION_HEADER_PREFIX:
stream_name = data["DeliveryStreamName"]
version_id = data["CurrentDeliveryStreamVersionId"]
destination_id = data["DestinationId"]
s3_update = (
data["S3DestinationUpdate"] if "S3DestinationUpdate" in data else None
)
update_destination(
stream_name=stream_name,
destination_id=destination_id,
s3_update=s3_update,
version_id=version_id,
)
es_update = (
data["ESDestinationUpdate"] if "ESDestinationUpdate" in data else None
)
update_destination(
stream_name=stream_name,
destination_id=destination_id,
elasticsearch_update=es_update,
version_id=version_id,
)
response = {}
elif action == "%s.ListTagsForDeliveryStream" % ACTION_HEADER_PREFIX:
response = get_delivery_stream_tags(
data["DeliveryStreamName"],
data.get("ExclusiveStartTagKey"),
data.get("Limit", 50),
)
else:
response = error_response(
'Unknown action "%s"' % action, code=400, error_type="InvalidAction"
)
if isinstance(response, dict):
response = jsonify(response)
return response
|
def post_request():
action = request.headers.get("x-amz-target")
data = json.loads(to_str(request.data))
response = None
if action == "%s.ListDeliveryStreams" % ACTION_HEADER_PREFIX:
response = {
"DeliveryStreamNames": get_delivery_stream_names(),
"HasMoreDeliveryStreams": False,
}
elif action == "%s.CreateDeliveryStream" % ACTION_HEADER_PREFIX:
stream_name = data["DeliveryStreamName"]
region_name = extract_region_from_auth_header(request.headers)
response = create_stream(
stream_name,
delivery_stream_type=data.get("DeliveryStreamType"),
delivery_stream_type_configuration=data.get(
"KinesisStreamSourceConfiguration"
),
s3_destination=data.get("S3DestinationConfiguration"),
elasticsearch_destination=data.get("ElasticsearchDestinationConfiguration"),
tags=data.get("Tags"),
region_name=region_name,
)
elif action == "%s.DeleteDeliveryStream" % ACTION_HEADER_PREFIX:
stream_name = data["DeliveryStreamName"]
response = delete_stream(stream_name)
elif action == "%s.DescribeDeliveryStream" % ACTION_HEADER_PREFIX:
stream_name = data["DeliveryStreamName"]
response = get_stream(stream_name)
if not response:
return error_not_found(stream_name)
response = {"DeliveryStreamDescription": response}
elif action == "%s.PutRecord" % ACTION_HEADER_PREFIX:
stream_name = data["DeliveryStreamName"]
record = data["Record"]
put_record(stream_name, record)
response = {"RecordId": str(uuid.uuid4())}
elif action == "%s.PutRecordBatch" % ACTION_HEADER_PREFIX:
stream_name = data["DeliveryStreamName"]
records = data["Records"]
put_records(stream_name, records)
request_responses = []
for i in records:
request_responses.append({"RecordId": str(uuid.uuid4())})
response = {"FailedPutCount": 0, "RequestResponses": request_responses}
elif action == "%s.UpdateDestination" % ACTION_HEADER_PREFIX:
stream_name = data["DeliveryStreamName"]
version_id = data["CurrentDeliveryStreamVersionId"]
destination_id = data["DestinationId"]
s3_update = (
data["S3DestinationUpdate"] if "S3DestinationUpdate" in data else None
)
update_destination(
stream_name=stream_name,
destination_id=destination_id,
s3_update=s3_update,
version_id=version_id,
)
es_update = (
data["ESDestinationUpdate"] if "ESDestinationUpdate" in data else None
)
update_destination(
stream_name=stream_name,
destination_id=destination_id,
es_update=es_update,
version_id=version_id,
)
response = {}
elif action == "%s.ListTagsForDeliveryStream" % ACTION_HEADER_PREFIX:
response = get_delivery_stream_tags(
data["DeliveryStreamName"],
data.get("ExclusiveStartTagKey"),
data.get("Limit", 50),
)
else:
response = error_response(
'Unknown action "%s"' % action, code=400, error_type="InvalidAction"
)
if isinstance(response, dict):
response = jsonify(response)
return response
|
https://github.com/localstack/localstack/issues/2020
|
2020-02-05T17:05:30:ERROR:flask.app: Exception on / [POST]
Traceback (most recent call last):
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask_cors/extension.py", line 161, in wrapped_function
return cors_after_request(app.make_response(f(*args, **kwargs)))
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/code/localstack/.venv/lib/python3.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/code/localstack/localstack/services/firehose/firehose_api.py", line 276, in post_request
es_update=es_update, version_id=version_id)
TypeError: update_destination() got an unexpected keyword argument 'es_update'
|
TypeError
|
def forward_request(self, method, path, data, headers):
if re.match(PATH_REGEX_USER_REQUEST, path):
search_match = re.search(PATH_REGEX_USER_REQUEST, path)
api_id = search_match.group(1)
stage = search_match.group(2)
relative_path_w_query_params = "/%s" % search_match.group(3)
return invoke_rest_api(
api_id,
stage,
method,
relative_path_w_query_params,
data,
headers,
path=path,
)
data = data and json.loads(to_str(data))
if re.match(PATH_REGEX_AUTHORIZERS, path):
return handle_authorizers(method, path, data, headers)
if re.match(PATH_REGEX_RESPONSES, path):
search_match = re.search(PATH_REGEX_RESPONSES, path)
api_id = search_match.group(1)
if method == "GET":
return get_gateway_responses(api_id)
if method == "PUT":
response_type = search_match.group(2).lstrip("/")
return put_gateway_response(api_id, response_type, data)
return True
|
def forward_request(self, method, path, data, headers):
data = data and json.loads(to_str(data))
if re.match(PATH_REGEX_USER_REQUEST, path):
search_match = re.search(PATH_REGEX_USER_REQUEST, path)
api_id = search_match.group(1)
stage = search_match.group(2)
relative_path_w_query_params = "/%s" % search_match.group(3)
return invoke_rest_api(
api_id,
stage,
method,
relative_path_w_query_params,
data,
headers,
path=path,
)
if re.match(PATH_REGEX_AUTHORIZERS, path):
return handle_authorizers(method, path, data, headers)
if re.match(PATH_REGEX_RESPONSES, path):
search_match = re.search(PATH_REGEX_RESPONSES, path)
api_id = search_match.group(1)
if method == "GET":
return get_gateway_responses(api_id)
if method == "PUT":
response_type = search_match.group(2).lstrip("/")
return put_gateway_response(api_id, response_type, data)
return True
|
https://github.com/localstack/localstack/issues/1743
|
2019-11-08T18:53:53:ERROR:localstack.services.generic_proxy: Exception running proxy on port 8081: [Errno 13] Permission denied: '/tmp/localstack/server.test.pem' Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 384, in run_cmd
combined_file, cert_file_name, key_file_name = GenericProxy.create_ssl_cert(serial_number=self.port)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 401, in create_ssl_cert
return generate_ssl_cert(SERVER_CERT_PEM_FILE, serial_number=serial_number)
File "/opt/code/localstack/localstack/utils/common.py", line 828, in generate_ssl_cert
save_file(target_file, file_content)
File "/opt/code/localstack/localstack/utils/common.py", line 642, in save_file
with open(file, mode) as f:
PermissionError: [Errno 13] Permission denied: '/tmp/localstack/server.test.pem'
|
PermissionError
|
def __init__(self, name, start, check=None, listener=None, priority=0):
self.plugin_name = name
self.start_function = start
self.listener = listener
self.check_function = check
self.priority = priority
|
def __init__(self, name, start, check=None, listener=None):
self.plugin_name = name
self.start_function = start
self.listener = listener
self.check_function = check
|
https://github.com/localstack/localstack/issues/1743
|
2019-11-08T18:53:53:ERROR:localstack.services.generic_proxy: Exception running proxy on port 8081: [Errno 13] Permission denied: '/tmp/localstack/server.test.pem' Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 384, in run_cmd
combined_file, cert_file_name, key_file_name = GenericProxy.create_ssl_cert(serial_number=self.port)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 401, in create_ssl_cert
return generate_ssl_cert(SERVER_CERT_PEM_FILE, serial_number=serial_number)
File "/opt/code/localstack/localstack/utils/common.py", line 828, in generate_ssl_cert
save_file(target_file, file_content)
File "/opt/code/localstack/localstack/utils/common.py", line 642, in save_file
with open(file, mode) as f:
PermissionError: [Errno 13] Permission denied: '/tmp/localstack/server.test.pem'
|
PermissionError
|
def register_plugin(plugin):
existing = SERVICE_PLUGINS.get(plugin.name())
if existing:
if existing.priority > plugin.priority:
return
SERVICE_PLUGINS[plugin.name()] = plugin
|
def register_plugin(plugin):
SERVICE_PLUGINS[plugin.name()] = plugin
|
https://github.com/localstack/localstack/issues/1743
|
2019-11-08T18:53:53:ERROR:localstack.services.generic_proxy: Exception running proxy on port 8081: [Errno 13] Permission denied: '/tmp/localstack/server.test.pem' Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 384, in run_cmd
combined_file, cert_file_name, key_file_name = GenericProxy.create_ssl_cert(serial_number=self.port)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 401, in create_ssl_cert
return generate_ssl_cert(SERVER_CERT_PEM_FILE, serial_number=serial_number)
File "/opt/code/localstack/localstack/utils/common.py", line 828, in generate_ssl_cert
save_file(target_file, file_content)
File "/opt/code/localstack/localstack/utils/common.py", line 642, in save_file
with open(file, mode) as f:
PermissionError: [Errno 13] Permission denied: '/tmp/localstack/server.test.pem'
|
PermissionError
|
def generate_ssl_cert(
target_file=None,
overwrite=False,
random=False,
return_content=False,
serial_number=None,
):
# Note: Do NOT import "OpenSSL" at the root scope
# (Our test Lambdas are importing this file but don't have the module installed)
from OpenSSL import crypto
if target_file and not overwrite and os.path.exists(target_file):
key_file_name = "%s.key" % target_file
cert_file_name = "%s.crt" % target_file
return target_file, cert_file_name, key_file_name
if random and target_file:
if "." in target_file:
target_file = target_file.replace(".", ".%s." % short_uid(), 1)
else:
target_file = "%s.%s" % (target_file, short_uid())
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
# create a self-signed cert
cert = crypto.X509()
subj = cert.get_subject()
subj.C = "AU"
subj.ST = "Some-State"
subj.L = "Some-Locality"
subj.O = "LocalStack Org" # noqa
subj.OU = "Testing"
subj.CN = "localhost"
serial_number = serial_number or 1001
cert.set_serial_number(serial_number)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, "sha1")
cert_file = StringIO()
key_file = StringIO()
cert_file.write(to_str(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)))
key_file.write(to_str(crypto.dump_privatekey(crypto.FILETYPE_PEM, k)))
cert_file_content = cert_file.getvalue().strip()
key_file_content = key_file.getvalue().strip()
file_content = "%s\n%s" % (key_file_content, cert_file_content)
if target_file:
key_file_name = "%s.key" % target_file
cert_file_name = "%s.crt" % target_file
# check existence to avoid permission denied issues:
# https://github.com/localstack/localstack/issues/1607
if not os.path.exists(target_file):
for i in range(2):
try:
save_file(target_file, file_content)
save_file(key_file_name, key_file_content)
save_file(cert_file_name, cert_file_content)
break
except Exception as e:
if i > 0:
raise
LOG.info(
"Unable to store certificate file under %s, using tmp file instead: %s"
% (target_file, e)
)
# Fix for https://github.com/localstack/localstack/issues/1743
target_file = "%s.pem" % new_tmp_file()
key_file_name = "%s.key" % target_file
cert_file_name = "%s.crt" % target_file
TMP_FILES.append(target_file)
TMP_FILES.append(key_file_name)
TMP_FILES.append(cert_file_name)
if not return_content:
return target_file, cert_file_name, key_file_name
return file_content
|
def generate_ssl_cert(
target_file=None,
overwrite=False,
random=False,
return_content=False,
serial_number=None,
):
# Note: Do NOT import "OpenSSL" at the root scope
# (Our test Lambdas are importing this file but don't have the module installed)
from OpenSSL import crypto
if target_file and not overwrite and os.path.exists(target_file):
key_file_name = "%s.key" % target_file
cert_file_name = "%s.crt" % target_file
return target_file, cert_file_name, key_file_name
if random and target_file:
if "." in target_file:
target_file = target_file.replace(".", ".%s." % short_uid(), 1)
else:
target_file = "%s.%s" % (target_file, short_uid())
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
# create a self-signed cert
cert = crypto.X509()
subj = cert.get_subject()
subj.C = "AU"
subj.ST = "Some-State"
subj.L = "Some-Locality"
subj.O = "LocalStack Org" # noqa
subj.OU = "Testing"
subj.CN = "localhost"
serial_number = serial_number or 1001
cert.set_serial_number(serial_number)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, "sha1")
cert_file = StringIO()
key_file = StringIO()
cert_file.write(to_str(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)))
key_file.write(to_str(crypto.dump_privatekey(crypto.FILETYPE_PEM, k)))
cert_file_content = cert_file.getvalue().strip()
key_file_content = key_file.getvalue().strip()
file_content = "%s\n%s" % (key_file_content, cert_file_content)
if target_file:
key_file_name = "%s.key" % target_file
cert_file_name = "%s.crt" % target_file
# check existence to avoid permission denied issues:
# https://github.com/localstack/localstack/issues/1607
if not os.path.exists(target_file):
save_file(target_file, file_content)
save_file(key_file_name, key_file_content)
save_file(cert_file_name, cert_file_content)
TMP_FILES.append(target_file)
TMP_FILES.append(key_file_name)
TMP_FILES.append(cert_file_name)
if not return_content:
return target_file, cert_file_name, key_file_name
return file_content
|
https://github.com/localstack/localstack/issues/1743
|
2019-11-08T18:53:53:ERROR:localstack.services.generic_proxy: Exception running proxy on port 8081: [Errno 13] Permission denied: '/tmp/localstack/server.test.pem' Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 384, in run_cmd
combined_file, cert_file_name, key_file_name = GenericProxy.create_ssl_cert(serial_number=self.port)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 401, in create_ssl_cert
return generate_ssl_cert(SERVER_CERT_PEM_FILE, serial_number=serial_number)
File "/opt/code/localstack/localstack/utils/common.py", line 828, in generate_ssl_cert
save_file(target_file, file_content)
File "/opt/code/localstack/localstack/utils/common.py", line 642, in save_file
with open(file, mode) as f:
PermissionError: [Errno 13] Permission denied: '/tmp/localstack/server.test.pem'
|
PermissionError
|
def event_type_matches(events, action, api_method):
"""check whether any of the event types in `events` matches the
given `action` and `api_method`, and return the first match."""
events = events or []
for event in events:
regex = event.replace("*", "[^:]*")
action_string = "s3:%s:%s" % (action, api_method)
match = re.match(regex, action_string)
if match:
return match
return False
|
def event_type_matches(events, action, api_method):
"""check whether any of the event types in `events` matches the
given `action` and `api_method`, and return the first match."""
for event in events:
regex = event.replace("*", "[^:]*")
action_string = "s3:%s:%s" % (action, api_method)
match = re.match(regex, action_string)
if match:
return match
return False
|
https://github.com/localstack/localstack/issues/450
|
2017-11-06T16:27:30:ERROR:localstack.services.generic_proxy: Error forwarding request: 'list' object has no attribute 'get' Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/localstack/services/generic_proxy.py", line 161, in forward
path=path, data=data, headers=forward_headers)
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 402, in forward_request
events = config.get('Event')
AttributeError: 'list' object has no attribute 'get'
|
AttributeError
|
def send_notifications(method, bucket_name, object_path, version_id):
for bucket, notifs in S3_NOTIFICATIONS.items():
if bucket == bucket_name:
action = {
"PUT": "ObjectCreated",
"POST": "ObjectCreated",
"DELETE": "ObjectRemoved",
}[method]
# TODO: support more detailed methods, e.g., DeleteMarkerCreated
# http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
if action == "ObjectCreated" and method == "POST":
api_method = "CompleteMultipartUpload"
else:
api_method = {"PUT": "Put", "POST": "Post", "DELETE": "Delete"}[method]
event_name = "%s:%s" % (action, api_method)
for notif in notifs:
send_notification_for_subscriber(
notif,
bucket_name,
object_path,
version_id,
api_method,
action,
event_name,
)
|
def send_notifications(method, bucket_name, object_path, version_id):
for bucket, b_cfg in iteritems(S3_NOTIFICATIONS):
if bucket == bucket_name:
action = {
"PUT": "ObjectCreated",
"POST": "ObjectCreated",
"DELETE": "ObjectRemoved",
}[method]
# TODO: support more detailed methods, e.g., DeleteMarkerCreated
# http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
if action == "ObjectCreated" and method == "POST":
api_method = "CompleteMultipartUpload"
else:
api_method = {"PUT": "Put", "POST": "Post", "DELETE": "Delete"}[method]
event_name = "%s:%s" % (action, api_method)
if event_type_matches(
b_cfg["Event"], action, api_method
) and filter_rules_match(b_cfg.get("Filter"), object_path):
# send notification
message = get_event_message(
event_name=event_name,
bucket_name=bucket_name,
file_name=urlparse.urlparse(object_path[1:]).path,
version_id=version_id,
)
message = json.dumps(message)
if b_cfg.get("Queue"):
sqs_client = aws_stack.connect_to_service("sqs")
try:
queue_url = queue_url_for_arn(b_cfg["Queue"])
sqs_client.send_message(QueueUrl=queue_url, MessageBody=message)
except Exception as e:
LOGGER.warning(
'Unable to send notification for S3 bucket "%s" to SQS queue "%s": %s'
% (bucket_name, b_cfg["Queue"], e)
)
if b_cfg.get("Topic"):
sns_client = aws_stack.connect_to_service("sns")
try:
sns_client.publish(
TopicArn=b_cfg["Topic"],
Message=message,
Subject="Amazon S3 Notification",
)
except Exception:
LOGGER.warning(
'Unable to send notification for S3 bucket "%s" to SNS topic "%s".'
% (bucket_name, b_cfg["Topic"])
)
# CloudFunction and LambdaFunction are semantically identical
lambda_function_config = b_cfg.get("CloudFunction") or b_cfg.get(
"LambdaFunction"
)
if lambda_function_config:
# make sure we don't run into a socket timeout
connection_config = botocore.config.Config(read_timeout=300)
lambda_client = aws_stack.connect_to_service(
"lambda", config=connection_config
)
try:
lambda_client.invoke(
FunctionName=lambda_function_config,
InvocationType="Event",
Payload=message,
)
except Exception:
LOGGER.warning(
'Unable to send notification for S3 bucket "%s" to Lambda function "%s".'
% (bucket_name, lambda_function_config)
)
if not filter(lambda x: b_cfg.get(x), NOTIFICATION_DESTINATION_TYPES):
LOGGER.warning(
"Neither of %s defined for S3 notification."
% "/".join(NOTIFICATION_DESTINATION_TYPES)
)
|
https://github.com/localstack/localstack/issues/450
|
2017-11-06T16:27:30:ERROR:localstack.services.generic_proxy: Error forwarding request: 'list' object has no attribute 'get' Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/localstack/services/generic_proxy.py", line 161, in forward
path=path, data=data, headers=forward_headers)
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 402, in forward_request
events = config.get('Event')
AttributeError: 'list' object has no attribute 'get'
|
AttributeError
|
def handle_notification_request(bucket, method, data):
response = Response()
response.status_code = 200
response._content = ""
if method == "GET":
# TODO check if bucket exists
result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3
if bucket in S3_NOTIFICATIONS:
notifs = S3_NOTIFICATIONS[bucket]
for notif in notifs:
for dest in NOTIFICATION_DESTINATION_TYPES:
if dest in notif:
dest_dict = {
"%sConfiguration" % dest: {
"Id": uuid.uuid4(),
dest: notif[dest],
"Event": notif["Event"],
"Filter": notif["Filter"],
}
}
result += xmltodict.unparse(dest_dict, full_document=False)
result += "</NotificationConfiguration>"
response._content = result
if method == "PUT":
parsed = xmltodict.parse(data)
notif_config = parsed.get("NotificationConfiguration")
S3_NOTIFICATIONS[bucket] = []
for dest in NOTIFICATION_DESTINATION_TYPES:
config = notif_config.get("%sConfiguration" % (dest))
configs = config if isinstance(config, list) else [config] if config else []
for config in configs:
events = config.get("Event")
if isinstance(events, six.string_types):
events = [events]
event_filter = config.get("Filter", {})
# make sure FilterRule is an array
s3_filter = _get_s3_filter(event_filter)
if s3_filter and not isinstance(s3_filter.get("FilterRule", []), list):
s3_filter["FilterRule"] = [s3_filter["FilterRule"]]
# create final details dict
notification_details = {
"Id": config.get("Id"),
"Event": events,
dest: config.get(dest),
"Filter": event_filter,
}
S3_NOTIFICATIONS[bucket].append(clone(notification_details))
return response
|
def handle_notification_request(bucket, method, data):
response = Response()
response.status_code = 200
response._content = ""
if method == "GET":
# TODO check if bucket exists
result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3
if bucket in S3_NOTIFICATIONS:
notif = S3_NOTIFICATIONS[bucket]
for dest in NOTIFICATION_DESTINATION_TYPES:
if dest in notif:
dest_dict = {
"%sConfiguration" % dest: {
"Id": uuid.uuid4(),
dest: notif[dest],
"Event": notif["Event"],
"Filter": notif["Filter"],
}
}
result += xmltodict.unparse(dest_dict, full_document=False)
result += "</NotificationConfiguration>"
response._content = result
if method == "PUT":
parsed = xmltodict.parse(data)
notif_config = parsed.get("NotificationConfiguration")
S3_NOTIFICATIONS.pop(bucket, None)
for dest in NOTIFICATION_DESTINATION_TYPES:
config = notif_config.get("%sConfiguration" % (dest))
if config:
events = config.get("Event")
if isinstance(events, six.string_types):
events = [events]
event_filter = config.get("Filter", {})
# make sure FilterRule is an array
s3_filter = _get_s3_filter(event_filter)
if s3_filter and not isinstance(s3_filter.get("FilterRule", []), list):
s3_filter["FilterRule"] = [s3_filter["FilterRule"]]
# create final details dict
notification_details = {
"Id": config.get("Id"),
"Event": events,
dest: config.get(dest),
"Filter": event_filter,
}
# TODO: what if we have multiple destinations - would we overwrite the config?
S3_NOTIFICATIONS[bucket] = clone(notification_details)
return response
|
https://github.com/localstack/localstack/issues/450
|
2017-11-06T16:27:30:ERROR:localstack.services.generic_proxy: Error forwarding request: 'list' object has no attribute 'get' Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/localstack/services/generic_proxy.py", line 161, in forward
path=path, data=data, headers=forward_headers)
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_listener.py", line 402, in forward_request
events = config.get('Event')
AttributeError: 'list' object has no attribute 'get'
|
AttributeError
|
def _store_logs(self, func_details, log_output, invocation_time):
if not aws_stack.is_service_enabled("logs"):
return
logs_client = aws_stack.connect_to_service("logs")
log_group_name = "/aws/lambda/%s" % func_details.name()
time_str = time.strftime("%Y/%m/%d", time.gmtime(invocation_time))
log_stream_name = "%s/[$LATEST]%s" % (time_str, short_uid())
# make sure that the log group exists
log_groups = logs_client.describe_log_groups()["logGroups"]
log_groups = [lg["logGroupName"] for lg in log_groups]
if log_group_name not in log_groups:
try:
logs_client.create_log_group(logGroupName=log_group_name)
except botocore.errorfactory.ResourceAlreadyExistsException:
# this can happen in certain cases, possibly due to a race condition
pass
# create a new log stream for this lambda invocation
logs_client.create_log_stream(
logGroupName=log_group_name, logStreamName=log_stream_name
)
# store new log events under the log stream
invocation_time = invocation_time
finish_time = int(time.time() * 1000)
log_lines = log_output.split("\n")
time_diff_per_line = float(finish_time - invocation_time) / float(len(log_lines))
log_events = []
for i, line in enumerate(log_lines):
if not line:
continue
# simple heuristic: assume log lines were emitted in regular intervals
log_time = invocation_time + float(i) * time_diff_per_line
event = {"timestamp": int(log_time), "message": line}
log_events.append(event)
if not log_events:
return
logs_client.put_log_events(
logGroupName=log_group_name, logStreamName=log_stream_name, logEvents=log_events
)
|
def _store_logs(self, func_details, log_output, invocation_time):
if not aws_stack.is_service_enabled("logs"):
return
logs_client = aws_stack.connect_to_service("logs")
log_group_name = "/aws/lambda/%s" % func_details.name()
time_str = time.strftime("%Y/%m/%d", time.gmtime(invocation_time))
log_stream_name = "%s/[$LATEST]%s" % (time_str, short_uid())
# make sure that the log group exists
log_groups = logs_client.describe_log_groups()["logGroups"]
log_groups = [lg["logGroupName"] for lg in log_groups]
if log_group_name not in log_groups:
logs_client.create_log_group(logGroupName=log_group_name)
# create a new log stream for this lambda invocation
logs_client.create_log_stream(
logGroupName=log_group_name, logStreamName=log_stream_name
)
# store new log events under the log stream
invocation_time = invocation_time
finish_time = int(time.time() * 1000)
log_lines = log_output.split("\n")
time_diff_per_line = float(finish_time - invocation_time) / float(len(log_lines))
log_events = []
for i, line in enumerate(log_lines):
if not line:
continue
# simple heuristic: assume log lines were emitted in regular intervals
log_time = invocation_time + float(i) * time_diff_per_line
event = {"timestamp": int(log_time), "message": line}
log_events.append(event)
if not log_events:
return
logs_client.put_log_events(
logGroupName=log_group_name, logStreamName=log_stream_name, logEvents=log_events
)
|
https://github.com/localstack/localstack/issues/1642
|
{"Type": "Server", "message": "Error executing Lambda function arn:aws:lambda:ap-northeast-1:000000000000:function:service-stage-functionName: An error occurred (ResourceAlreadyExistsException) when calling the CreateLogGroup operation: The specified log group already exists Traceback (most recent call last):\n
File \"/opt/code/localstack/localstack/services/awslambda/lambda_api.py\", line 338, in run_lambda\n
event, context=context, version=version, asynchronous=asynchronous)\n
File \"/opt/code/localstack/localstack/services/awslambda/lambda_executors.py\", line 81, in execute\n
return do_execute()\n File \"/opt/code/localstack/localstack/services/awslambda/lambda_executors.py\", line 70, in do_execute\n self._store_logs(func_details, log_output, invocation_time)\n
File \"/opt/code/localstack/localstack/services/awslambda/lambda_executors.py\", line 105, in _store_logs\n logs_client.create_log_group(logGroupName=log_group_name)\n
File \"/opt/code/localstack/.venv/lib/python3.6/site-packages/botocore/client.py\", line 357, in _api_call\n
return self._make_api_call(operation_name, kwargs)\n
File \"/opt/code/localstack/.venv/lib/python3.6/site-packages/botocore/client.py\", line 661, in _make_api_call\n
raise error_class(parsed_response, operation_name)\n
botocore.errorfactory.ResourceAlreadyExistsException: An error occurred (ResourceAlreadyExistsException) when calling the CreateLogGroup operation: The specified log group already exists\n", "__type": "InternalFailure"}
|
botocore.errorfactory.ResourceAlreadyExistsException
|
def apply_patches():
"""Apply patches to make LocalStack seamlessly interact with the moto backend.
TODO: Eventually, these patches should be contributed to the upstream repo!"""
# Patch S3Backend.get_key method in moto to use S3 API from LocalStack
def get_key(self, bucket_name, key_name, version_id=None):
s3_client = aws_stack.connect_to_service("s3")
value = b""
if bucket_name != BUCKET_MARKER_LOCAL:
value = s3_client.get_object(Bucket=bucket_name, Key=key_name)[
"Body"
].read()
return s3_models.FakeKey(name=key_name, value=value)
s3_models.S3Backend.get_key = get_key
# Patch clean_json in moto
def clean_json(resource_json, resources_map):
result = clean_json_orig(resource_json, resources_map)
if isinstance(result, BaseModel):
if isinstance(resource_json, dict) and "Ref" in resource_json:
entity_id = get_entity_id(result, resource_json)
if entity_id:
return entity_id
LOG.warning(
'Unable to resolve "Ref" attribute for: %s - %s - %s',
resource_json,
result,
type(result),
)
return result
clean_json_orig = parsing.clean_json
parsing.clean_json = clean_json
# add model mappings to moto
parsing.MODEL_MAP.update(MODEL_MAP)
# Patch parse_and_create_resource method in moto to deploy resources in LocalStack
def parse_and_create_resource(
logical_id, resource_json, resources_map, region_name
):
try:
return _parse_and_create_resource(
logical_id, resource_json, resources_map, region_name
)
except Exception as e:
LOG.error(
'Unable to parse and create resource "%s": %s %s'
% (logical_id, e, traceback.format_exc())
)
raise
def parse_and_update_resource(
logical_id, resource_json, resources_map, region_name
):
try:
return _parse_and_create_resource(
logical_id, resource_json, resources_map, region_name, update=True
)
except Exception as e:
LOG.error(
'Unable to parse and update resource "%s": %s %s'
% (logical_id, e, traceback.format_exc())
)
raise
def _parse_and_create_resource(
logical_id, resource_json, resources_map, region_name, update=False
):
stack_name = resources_map.get("AWS::StackName")
resource_hash_key = (stack_name, logical_id)
# If the current stack is being updated, avoid infinite recursion
updating = CURRENTLY_UPDATING_RESOURCES.get(resource_hash_key)
LOG.debug(
"Currently updating stack resource %s/%s: %s"
% (stack_name, logical_id, updating)
)
if updating:
return None
# parse and get final resource JSON
resource_tuple = parsing.parse_resource(
logical_id, resource_json, resources_map
)
if not resource_tuple:
return None
_, resource_json, _ = resource_tuple
# add some missing default props which otherwise cause deployments to fail
props = resource_json["Properties"] = resource_json.get("Properties") or {}
if resource_json["Type"] == "AWS::Lambda::EventSourceMapping" and not props.get(
"StartingPosition"
):
props["StartingPosition"] = "LATEST"
# check if this resource already exists in the resource map
resource = resources_map._parsed_resources.get(logical_id)
if resource and not update:
return resource
# check whether this resource needs to be deployed
resource_wrapped = {logical_id: resource_json}
should_be_created = template_deployer.should_be_deployed(
logical_id, resource_wrapped, stack_name
)
if not should_be_created:
# This resource is either not deployable or already exists. Check if it can be updated
if not template_deployer.is_updateable(
logical_id, resource_wrapped, stack_name
):
LOG.debug(
"Resource %s need not be deployed: %s" % (logical_id, resource_json)
)
if resource:
return resource
# fix resource ARNs, make sure to convert account IDs 000000000000 to 123456789012
resource_json_arns_fixed = clone(json_safe(convert_objs_to_ids(resource_json)))
set_moto_account_ids(resource_json_arns_fixed)
# create resource definition and store CloudFormation metadata in moto
if resource or update:
parse_and_update_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
elif not resource:
resource = parse_and_create_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
# Fix for moto which sometimes hard-codes region name as 'us-east-1'
if hasattr(resource, "region_name") and resource.region_name != region_name:
LOG.debug(
"Updating incorrect region from %s to %s"
% (resource.region_name, region_name)
)
resource.region_name = region_name
# Apply some fixes/patches to the resource names, then deploy resource in LocalStack
update_resource_name(resource, resource_json)
LOG.debug("Deploying CloudFormation resource: %s" % resource_json)
try:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = True
deploy_func = (
template_deployer.update_resource
if update
else template_deployer.deploy_resource
)
result = deploy_func(logical_id, resource_wrapped, stack_name=stack_name)
finally:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = False
if not should_be_created:
# skip the parts below for update requests
return resource
def find_id(resource):
"""Find ID of the given resource."""
for id_attr in ("Id", "id", "ResourceId", "RestApiId", "DeploymentId"):
if id_attr in resource:
return resource[id_attr]
# update resource IDs to avoid mismatch between CF moto and LocalStack backend resources
if hasattr(resource, "id") or (
isinstance(resource, dict) and resource.get("id")
):
existing_id = resource.id if hasattr(resource, "id") else resource["id"]
new_res_id = find_id(result)
LOG.debug(
"Updating resource id: %s - %s, %s - %s"
% (existing_id, new_res_id, resource, resource_json)
)
if new_res_id:
LOG.info(
"Updating resource ID from %s to %s (%s)"
% (existing_id, new_res_id, region_name)
)
update_resource_id(resource, new_res_id, props, region_name)
else:
LOG.warning(
"Unable to extract id for resource %s: %s" % (logical_id, result)
)
# update physical_resource_id field
update_physical_resource_id(resource)
return resource
def update_resource_name(resource, resource_json):
"""Some resources require minor fixes in their CF resource definition
before we can pass them on to deployment."""
props = resource_json["Properties"] = resource_json.get("Properties") or {}
if isinstance(resource, sfn_models.StateMachine) and not props.get(
"StateMachineName"
):
props["StateMachineName"] = resource.name
def update_resource_id(resource, new_id, props, region_name):
"""Update and fix the ID(s) of the given resource."""
# NOTE: this is a bit of a hack, which is required because
# of the order of events when CloudFormation resources are created.
# When we process a request to create a CF resource that's part of a
# stack, say, an API Gateway Resource, then we (1) create the object
# in memory in moto, which generates a random ID for the resource, and
# (2) create the actual resource in the backend service using
# template_deployer.deploy_resource(..) (see above).
# The resource created in (2) now has a different ID than the resource
# created in (1), which leads to downstream problems. Hence, we need
# the logic below to reconcile the ids, i.e., apply IDs from (2) to (1).
backend = apigw_models.apigateway_backends[region_name]
if isinstance(resource, apigw_models.RestAPI):
backend.apis.pop(resource.id, None)
backend.apis[new_id] = resource
# We also need to fetch the resources to replace the root resource
# that moto automatically adds to newly created RestAPI objects
client = aws_stack.connect_to_service("apigateway")
resources = client.get_resources(restApiId=new_id, limit=500)["items"]
# make sure no resources have been added in addition to the root /
assert len(resource.resources) == 1
resource.resources = {}
for res in resources:
res_path_part = res.get("pathPart") or res.get("path")
child = resource.add_child(res_path_part, res.get("parentId"))
resource.resources.pop(child.id)
child.id = res["id"]
child.api_id = new_id
resource.resources[child.id] = child
resource.id = new_id
elif isinstance(resource, apigw_models.Resource):
api_id = props["RestApiId"]
backend.apis[api_id].resources.pop(resource.id, None)
backend.apis[api_id].resources[new_id] = resource
resource.id = new_id
elif isinstance(resource, apigw_models.Deployment):
api_id = props["RestApiId"]
backend.apis[api_id].deployments.pop(resource["id"], None)
backend.apis[api_id].deployments[new_id] = resource
resource["id"] = new_id
else:
LOG.warning(
"Unexpected resource type when updating ID: %s" % type(resource)
)
def update_physical_resource_id(resource):
phys_res_id = getattr(resource, "physical_resource_id", None)
if not phys_res_id:
if isinstance(resource, lambda_models.LambdaFunction):
func_arn = aws_stack.lambda_function_arn(resource.function_name)
resource.function_arn = resource.physical_resource_id = func_arn
elif isinstance(resource, sfn_models.StateMachine):
sm_arn = aws_stack.state_machine_arn(resource.name)
resource.physical_resource_id = sm_arn
elif isinstance(resource, service_models.StepFunctionsActivity):
act_arn = aws_stack.stepfunctions_activity_arn(
resource.params.get("Name")
)
resource.physical_resource_id = act_arn
else:
LOG.warning(
"Unable to determine physical_resource_id for resource %s"
% type(resource)
)
parse_and_create_resource_orig = parsing.parse_and_create_resource
parsing.parse_and_create_resource = parse_and_create_resource
parse_and_update_resource_orig = parsing.parse_and_update_resource
parsing.parse_and_update_resource = parse_and_update_resource
# Patch CloudFormation parse_output(..) method to fix a bug in moto
def parse_output(output_logical_id, output_json, resources_map):
try:
return parse_output_orig(output_logical_id, output_json, resources_map)
except KeyError:
output = Output()
output.key = output_logical_id
output.value = None
output.description = output_json.get("Description")
return output
parse_output_orig = parsing.parse_output
parsing.parse_output = parse_output
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB_Table_get_cfn_attribute(self, attribute_name):
try:
return DynamoDB_Table_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.dynamodb_table_arn(table_name=self.name)
raise
DynamoDB_Table_get_cfn_attribute_orig = dynamodb_models.Table.get_cfn_attribute
dynamodb_models.Table.get_cfn_attribute = DynamoDB_Table_get_cfn_attribute
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB2_Table_get_cfn_attribute(self, attribute_name):
if attribute_name == "Arn":
return aws_stack.dynamodb_table_arn(table_name=self.name)
elif attribute_name == "StreamArn":
if (self.stream_specification or {}).get("StreamEnabled"):
return aws_stack.dynamodb_stream_arn(self.name, "latest")
return None
raise UnformattedGetAttTemplateException()
dynamodb2_models.Table.get_cfn_attribute = DynamoDB2_Table_get_cfn_attribute
# Patch SQS get_cfn_attribute(..) method in moto
def SQS_Queue_get_cfn_attribute(self, attribute_name):
if attribute_name == "Arn":
return aws_stack.sqs_queue_arn(queue_name=self.name)
return SQS_Queue_get_cfn_attribute_orig(self, attribute_name)
SQS_Queue_get_cfn_attribute_orig = sqs_models.Queue.get_cfn_attribute
sqs_models.Queue.get_cfn_attribute = SQS_Queue_get_cfn_attribute
# Patch SQS physical_resource_id(..) method in moto
@property
def SQS_Queue_physical_resource_id(self):
result = SQS_Queue_physical_resource_id_orig.fget(self)
if "://" not in result:
# convert ID to queue URL
return aws_stack.get_sqs_queue_url(result)
return result
SQS_Queue_physical_resource_id_orig = sqs_models.Queue.physical_resource_id
sqs_models.Queue.physical_resource_id = SQS_Queue_physical_resource_id
# Patch Lambda get_cfn_attribute(..) method in moto
def Lambda_Function_get_cfn_attribute(self, attribute_name):
try:
if attribute_name == "Arn":
return self.function_arn
return Lambda_Function_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name in ("Name", "FunctionName"):
return self.function_name
raise
Lambda_Function_get_cfn_attribute_orig = (
lambda_models.LambdaFunction.get_cfn_attribute
)
lambda_models.LambdaFunction.get_cfn_attribute = Lambda_Function_get_cfn_attribute
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB_Table_get_cfn_attribute(self, attribute_name):
try:
if attribute_name == "StreamArn":
streams = aws_stack.connect_to_service("dynamodbstreams").list_streams(
TableName=self.name
)["Streams"]
return streams[0]["StreamArn"] if streams else None
return DynamoDB_Table_get_cfn_attribute_orig(self, attribute_name)
except Exception as e:
LOG.warning(
'Unable to get attribute "%s" from resource %s: %s'
% (attribute_name, type(self), e)
)
raise
DynamoDB_Table_get_cfn_attribute_orig = dynamodb_models.Table.get_cfn_attribute
dynamodb_models.Table.get_cfn_attribute = DynamoDB_Table_get_cfn_attribute
# Patch IAM get_cfn_attribute(..) method in moto
def IAM_Role_get_cfn_attribute(self, attribute_name):
try:
return IAM_Role_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.role_arn(self.name)
raise
IAM_Role_get_cfn_attribute_orig = iam_models.Role.get_cfn_attribute
iam_models.Role.get_cfn_attribute = IAM_Role_get_cfn_attribute
# Patch LambdaFunction create_from_cloudformation_json(..) method in moto
@classmethod
def Lambda_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
resource_name = (
cloudformation_json.get("Properties", {}).get("FunctionName")
or resource_name
)
return Lambda_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
Lambda_create_from_cloudformation_json_orig = (
lambda_models.LambdaFunction.create_from_cloudformation_json
)
lambda_models.LambdaFunction.create_from_cloudformation_json = (
Lambda_create_from_cloudformation_json
)
# Patch LambdaFunction update_from_cloudformation_json(..) method in moto
@classmethod
def Lambda_update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
resource_name = (
cloudformation_json.get("Properties", {}).get("FunctionName")
or new_resource_name
)
return Lambda_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
if not hasattr(lambda_models.LambdaFunction, "update_from_cloudformation_json"):
lambda_models.LambdaFunction.update_from_cloudformation_json = (
Lambda_update_from_cloudformation_json
)
# patch ApiGateway Deployment
def depl_delete_from_cloudformation_json(resource_name, resource_json, region_name):
properties = resource_json["Properties"]
LOG.info(
"TODO: apigateway.Deployment.delete_from_cloudformation_json %s"
% properties
)
if not hasattr(apigw_models.Deployment, "delete_from_cloudformation_json"):
apigw_models.Deployment.delete_from_cloudformation_json = (
depl_delete_from_cloudformation_json
)
# patch Lambda Version
def vers_delete_from_cloudformation_json(resource_name, resource_json, region_name):
properties = resource_json["Properties"]
LOG.info(
"TODO: apigateway.Deployment.delete_from_cloudformation_json %s"
% properties
)
if not hasattr(lambda_models.LambdaVersion, "delete_from_cloudformation_json"):
lambda_models.LambdaVersion.delete_from_cloudformation_json = (
vers_delete_from_cloudformation_json
)
# add CloudWatch types
parsing.MODEL_MAP["AWS::ApiGateway::Deployment"] = apigw_models.Deployment
parsing.MODEL_MAP["AWS::ApiGateway::Method"] = apigw_models.Method
parsing.MODEL_MAP["AWS::ApiGateway::Resource"] = apigw_models.Resource
parsing.MODEL_MAP["AWS::ApiGateway::RestApi"] = apigw_models.RestAPI
parsing.MODEL_MAP["AWS::StepFunctions::StateMachine"] = sfn_models.StateMachine
@classmethod
def RestAPI_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
name = props["Name"]
region_name = props.get("Region") or aws_stack.get_region()
description = props.get("Description") or ""
id = props.get("Id") or short_uid()
return apigw_models.RestAPI(id, region_name, name, description)
def RestAPI_get_cfn_attribute(self, attribute_name):
if attribute_name == "Id":
return self.id
if attribute_name == "Region":
return self.region_name
if attribute_name == "Name":
return self.name
if attribute_name == "Description":
return self.description
if attribute_name == "RootResourceId":
for id, resource in self.resources.items():
if resource.parent_id is None:
return resource.id
return None
raise UnformattedGetAttTemplateException()
@classmethod
def Deployment_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
name = props["StageName"]
deployment_id = props.get("Id") or short_uid()
description = props.get("Description") or ""
return apigw_models.Deployment(deployment_id, name, description)
@classmethod
def Resource_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
region_name = props.get("Region") or aws_stack.get_region()
path_part = props.get("PathPart")
api_id = props.get("RestApiId")
parent_id = props.get("ParentId")
id = props.get("Id") or short_uid()
return apigw_models.Resource(id, region_name, api_id, path_part, parent_id)
@classmethod
def Method_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
method_type = props.get("HttpMethod")
authorization_type = props.get("AuthorizationType")
return apigw_models.Method(method_type, authorization_type)
apigw_models.RestAPI.create_from_cloudformation_json = (
RestAPI_create_from_cloudformation_json
)
apigw_models.RestAPI.get_cfn_attribute = RestAPI_get_cfn_attribute
apigw_models.Deployment.create_from_cloudformation_json = (
Deployment_create_from_cloudformation_json
)
apigw_models.Resource.create_from_cloudformation_json = (
Resource_create_from_cloudformation_json
)
apigw_models.Method.create_from_cloudformation_json = (
Method_create_from_cloudformation_json
)
# TODO: add support for AWS::ApiGateway::Model, AWS::ApiGateway::RequestValidator, ...
# fix AttributeError in moto's CloudFormation describe_stack_resource
def describe_stack_resource(self):
stack_name = self._get_param("StackName")
stack = self.cloudformation_backend.get_stack(stack_name)
logical_resource_id = self._get_param("LogicalResourceId")
for stack_resource in stack.stack_resources:
# Note: Line below has been patched
# if stack_resource.logical_resource_id == logical_resource_id:
if (
stack_resource
and stack_resource.logical_resource_id == logical_resource_id
):
resource = stack_resource
break
else:
raise ValidationError(logical_resource_id)
template = self.response_template(
responses.DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE
)
return template.render(stack=stack, resource=resource)
responses.CloudFormationResponse.describe_stack_resource = describe_stack_resource
|
def apply_patches():
"""Apply patches to make LocalStack seamlessly interact with the moto backend.
TODO: Eventually, these patches should be contributed to the upstream repo!"""
# Patch S3Backend.get_key method in moto to use S3 API from LocalStack
def get_key(self, bucket_name, key_name, version_id=None):
s3_client = aws_stack.connect_to_service("s3")
value = b""
if bucket_name != BUCKET_MARKER_LOCAL:
value = s3_client.get_object(Bucket=bucket_name, Key=key_name)[
"Body"
].read()
return s3_models.FakeKey(name=key_name, value=value)
s3_models.S3Backend.get_key = get_key
# Patch clean_json in moto
def clean_json(resource_json, resources_map):
result = clean_json_orig(resource_json, resources_map)
if isinstance(result, BaseModel):
if isinstance(resource_json, dict) and "Ref" in resource_json:
entity_id = get_entity_id(result, resource_json)
if entity_id:
return entity_id
LOG.warning(
'Unable to resolve "Ref" attribute for: %s - %s - %s',
resource_json,
result,
type(result),
)
return result
clean_json_orig = parsing.clean_json
parsing.clean_json = clean_json
# add model mappings to moto
parsing.MODEL_MAP.update(MODEL_MAP)
# Patch parse_and_create_resource method in moto to deploy resources in LocalStack
def parse_and_create_resource(
logical_id, resource_json, resources_map, region_name
):
try:
return _parse_and_create_resource(
logical_id, resource_json, resources_map, region_name
)
except Exception as e:
LOG.error(
'Unable to parse and create resource "%s": %s %s'
% (logical_id, e, traceback.format_exc())
)
raise
def _parse_and_create_resource(
logical_id, resource_json, resources_map, region_name
):
stack_name = resources_map.get("AWS::StackName")
resource_hash_key = (stack_name, logical_id)
# If the current stack is being updated, avoid infinite recursion
updating = CURRENTLY_UPDATING_RESOURCES.get(resource_hash_key)
LOG.debug(
"Currently updating stack resource %s/%s: %s"
% (stack_name, logical_id, updating)
)
if updating:
return None
# parse and get final resource JSON
resource_tuple = parsing.parse_resource(
logical_id, resource_json, resources_map
)
if not resource_tuple:
return None
_, resource_json, _ = resource_tuple
# add some missing default props which otherwise cause deployments to fail
props = resource_json["Properties"] = resource_json.get("Properties") or {}
if resource_json["Type"] == "AWS::Lambda::EventSourceMapping" and not props.get(
"StartingPosition"
):
props["StartingPosition"] = "LATEST"
# check if this resource already exists in the resource map
resource = resources_map._parsed_resources.get(logical_id)
# check whether this resource needs to be deployed
resource_wrapped = {logical_id: resource_json}
should_be_created = template_deployer.should_be_deployed(
logical_id, resource_wrapped, stack_name
)
if not should_be_created:
# This resource is either not deployable or already exists. Check if it can be updated
if not template_deployer.is_updateable(
logical_id, resource_wrapped, stack_name
):
LOG.debug(
"Resource %s need not be deployed: %s" % (logical_id, resource_json)
)
if resource:
return resource
if not resource:
# fix resource ARNs, make sure to convert account IDs 000000000000 to 123456789012
resource_json_arns_fixed = clone(
json_safe(convert_objs_to_ids(resource_json))
)
set_moto_account_ids(resource_json_arns_fixed)
# create resource definition and store CloudFormation metadata in moto
resource = parse_and_create_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
# Fix for moto which sometimes hard-codes region name as 'us-east-1'
if hasattr(resource, "region_name") and resource.region_name != region_name:
LOG.debug(
"Updating incorrect region from %s to %s"
% (resource.region_name, region_name)
)
resource.region_name = region_name
# Apply some fixes/patches to the resource names, then deploy resource in LocalStack
update_resource_name(resource, resource_json)
LOG.debug("Deploying CloudFormation resource: %s" % resource_json)
try:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = True
deploy_func = (
template_deployer.deploy_resource
if should_be_created
else template_deployer.update_resource
)
result = deploy_func(logical_id, resource_wrapped, stack_name=stack_name)
finally:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = False
if not should_be_created:
# skip the parts below for update requests
return resource
def find_id(resource):
"""Find ID of the given resource."""
for id_attr in ("Id", "id", "ResourceId", "RestApiId", "DeploymentId"):
if id_attr in resource:
return resource[id_attr]
# update resource IDs to avoid mismatch between CF moto and LocalStack backend resources
if hasattr(resource, "id") or (
isinstance(resource, dict) and resource.get("id")
):
existing_id = resource.id if hasattr(resource, "id") else resource["id"]
new_res_id = find_id(result)
LOG.debug(
"Updating resource id: %s - %s, %s - %s"
% (existing_id, new_res_id, resource, resource_json)
)
if new_res_id:
LOG.info(
"Updating resource ID from %s to %s (%s)"
% (existing_id, new_res_id, region_name)
)
update_resource_id(resource, new_res_id, props, region_name)
else:
LOG.warning(
"Unable to extract id for resource %s: %s" % (logical_id, result)
)
# update physical_resource_id field
update_physical_resource_id(resource)
return resource
def update_resource_name(resource, resource_json):
"""Some resources require minor fixes in their CF resource definition
before we can pass them on to deployment."""
props = resource_json["Properties"] = resource_json.get("Properties") or {}
if isinstance(resource, sfn_models.StateMachine) and not props.get(
"StateMachineName"
):
props["StateMachineName"] = resource.name
def update_resource_id(resource, new_id, props, region_name):
"""Update and fix the ID(s) of the given resource."""
# NOTE: this is a bit of a hack, which is required because
# of the order of events when CloudFormation resources are created.
# When we process a request to create a CF resource that's part of a
# stack, say, an API Gateway Resource, then we (1) create the object
# in memory in moto, which generates a random ID for the resource, and
# (2) create the actual resource in the backend service using
# template_deployer.deploy_resource(..) (see above).
# The resource created in (2) now has a different ID than the resource
# created in (1), which leads to downstream problems. Hence, we need
# the logic below to reconcile the ids, i.e., apply IDs from (2) to (1).
backend = apigw_models.apigateway_backends[region_name]
if isinstance(resource, apigw_models.RestAPI):
backend.apis.pop(resource.id, None)
backend.apis[new_id] = resource
# We also need to fetch the resources to replace the root resource
# that moto automatically adds to newly created RestAPI objects
client = aws_stack.connect_to_service("apigateway")
resources = client.get_resources(restApiId=new_id, limit=500)["items"]
# make sure no resources have been added in addition to the root /
assert len(resource.resources) == 1
resource.resources = {}
for res in resources:
res_path_part = res.get("pathPart") or res.get("path")
child = resource.add_child(res_path_part, res.get("parentId"))
resource.resources.pop(child.id)
child.id = res["id"]
child.api_id = new_id
resource.resources[child.id] = child
resource.id = new_id
elif isinstance(resource, apigw_models.Resource):
api_id = props["RestApiId"]
backend.apis[api_id].resources.pop(resource.id, None)
backend.apis[api_id].resources[new_id] = resource
resource.id = new_id
elif isinstance(resource, apigw_models.Deployment):
api_id = props["RestApiId"]
backend.apis[api_id].deployments.pop(resource["id"], None)
backend.apis[api_id].deployments[new_id] = resource
resource["id"] = new_id
else:
LOG.warning(
"Unexpected resource type when updating ID: %s" % type(resource)
)
def update_physical_resource_id(resource):
phys_res_id = getattr(resource, "physical_resource_id", None)
if not phys_res_id:
if isinstance(resource, lambda_models.LambdaFunction):
func_arn = aws_stack.lambda_function_arn(resource.function_name)
resource.function_arn = resource.physical_resource_id = func_arn
elif isinstance(resource, sfn_models.StateMachine):
sm_arn = aws_stack.state_machine_arn(resource.name)
resource.physical_resource_id = sm_arn
elif isinstance(resource, service_models.StepFunctionsActivity):
act_arn = aws_stack.stepfunctions_activity_arn(
resource.params.get("Name")
)
resource.physical_resource_id = act_arn
else:
LOG.warning(
"Unable to determine physical_resource_id for resource %s"
% type(resource)
)
parse_and_create_resource_orig = parsing.parse_and_create_resource
parsing.parse_and_create_resource = parse_and_create_resource
# Patch CloudFormation parse_output(..) method to fix a bug in moto
def parse_output(output_logical_id, output_json, resources_map):
try:
return parse_output_orig(output_logical_id, output_json, resources_map)
except KeyError:
output = Output()
output.key = output_logical_id
output.value = None
output.description = output_json.get("Description")
return output
parse_output_orig = parsing.parse_output
parsing.parse_output = parse_output
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB_Table_get_cfn_attribute(self, attribute_name):
try:
return DynamoDB_Table_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.dynamodb_table_arn(table_name=self.name)
raise
DynamoDB_Table_get_cfn_attribute_orig = dynamodb_models.Table.get_cfn_attribute
dynamodb_models.Table.get_cfn_attribute = DynamoDB_Table_get_cfn_attribute
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB2_Table_get_cfn_attribute(self, attribute_name):
if attribute_name == "Arn":
return aws_stack.dynamodb_table_arn(table_name=self.name)
elif attribute_name == "StreamArn":
if (self.stream_specification or {}).get("StreamEnabled"):
return aws_stack.dynamodb_stream_arn(self.name, "latest")
return None
raise UnformattedGetAttTemplateException()
dynamodb2_models.Table.get_cfn_attribute = DynamoDB2_Table_get_cfn_attribute
# Patch SQS get_cfn_attribute(..) method in moto
def SQS_Queue_get_cfn_attribute(self, attribute_name):
if attribute_name == "Arn":
return aws_stack.sqs_queue_arn(queue_name=self.name)
return SQS_Queue_get_cfn_attribute_orig(self, attribute_name)
SQS_Queue_get_cfn_attribute_orig = sqs_models.Queue.get_cfn_attribute
sqs_models.Queue.get_cfn_attribute = SQS_Queue_get_cfn_attribute
# Patch SQS physical_resource_id(..) method in moto
@property
def SQS_Queue_physical_resource_id(self):
result = SQS_Queue_physical_resource_id_orig.fget(self)
if "://" not in result:
# convert ID to queue URL
return aws_stack.get_sqs_queue_url(result)
return result
SQS_Queue_physical_resource_id_orig = sqs_models.Queue.physical_resource_id
sqs_models.Queue.physical_resource_id = SQS_Queue_physical_resource_id
# Patch Lambda get_cfn_attribute(..) method in moto
def Lambda_Function_get_cfn_attribute(self, attribute_name):
try:
if attribute_name == "Arn":
return self.function_arn
return Lambda_Function_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name in ("Name", "FunctionName"):
return self.function_name
raise
Lambda_Function_get_cfn_attribute_orig = (
lambda_models.LambdaFunction.get_cfn_attribute
)
lambda_models.LambdaFunction.get_cfn_attribute = Lambda_Function_get_cfn_attribute
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB_Table_get_cfn_attribute(self, attribute_name):
try:
if attribute_name == "StreamArn":
streams = aws_stack.connect_to_service("dynamodbstreams").list_streams(
TableName=self.name
)["Streams"]
return streams[0]["StreamArn"] if streams else None
return DynamoDB_Table_get_cfn_attribute_orig(self, attribute_name)
except Exception as e:
LOG.warning(
'Unable to get attribute "%s" from resource %s: %s'
% (attribute_name, type(self), e)
)
raise
DynamoDB_Table_get_cfn_attribute_orig = dynamodb_models.Table.get_cfn_attribute
dynamodb_models.Table.get_cfn_attribute = DynamoDB_Table_get_cfn_attribute
# Patch IAM get_cfn_attribute(..) method in moto
def IAM_Role_get_cfn_attribute(self, attribute_name):
try:
return IAM_Role_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.role_arn(self.name)
raise
IAM_Role_get_cfn_attribute_orig = iam_models.Role.get_cfn_attribute
iam_models.Role.get_cfn_attribute = IAM_Role_get_cfn_attribute
# Patch LambdaFunction create_from_cloudformation_json(..) method in moto
@classmethod
def Lambda_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
resource_name = (
cloudformation_json.get("Properties", {}).get("FunctionName")
or resource_name
)
return Lambda_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
Lambda_create_from_cloudformation_json_orig = (
lambda_models.LambdaFunction.create_from_cloudformation_json
)
lambda_models.LambdaFunction.create_from_cloudformation_json = (
Lambda_create_from_cloudformation_json
)
# add CloudWatch types
parsing.MODEL_MAP["AWS::ApiGateway::Deployment"] = apigw_models.Deployment
parsing.MODEL_MAP["AWS::ApiGateway::Method"] = apigw_models.Method
parsing.MODEL_MAP["AWS::ApiGateway::Resource"] = apigw_models.Resource
parsing.MODEL_MAP["AWS::ApiGateway::RestApi"] = apigw_models.RestAPI
parsing.MODEL_MAP["AWS::StepFunctions::StateMachine"] = sfn_models.StateMachine
@classmethod
def RestAPI_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
name = props["Name"]
region_name = props.get("Region") or aws_stack.get_region()
description = props.get("Description") or ""
id = props.get("Id") or short_uid()
return apigw_models.RestAPI(id, region_name, name, description)
def RestAPI_get_cfn_attribute(self, attribute_name):
if attribute_name == "Id":
return self.id
if attribute_name == "Region":
return self.region_name
if attribute_name == "Name":
return self.name
if attribute_name == "Description":
return self.description
if attribute_name == "RootResourceId":
for id, resource in self.resources.items():
if resource.parent_id is None:
return resource.id
return None
raise UnformattedGetAttTemplateException()
@classmethod
def Deployment_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
name = props["StageName"]
deployment_id = props.get("Id") or short_uid()
description = props.get("Description") or ""
return apigw_models.Deployment(deployment_id, name, description)
@classmethod
def Resource_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
region_name = props.get("Region") or aws_stack.get_region()
path_part = props.get("PathPart")
api_id = props.get("RestApiId")
parent_id = props.get("ParentId")
id = props.get("Id") or short_uid()
return apigw_models.Resource(id, region_name, api_id, path_part, parent_id)
@classmethod
def Method_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
method_type = props.get("HttpMethod")
authorization_type = props.get("AuthorizationType")
return apigw_models.Method(method_type, authorization_type)
apigw_models.RestAPI.create_from_cloudformation_json = (
RestAPI_create_from_cloudformation_json
)
apigw_models.RestAPI.get_cfn_attribute = RestAPI_get_cfn_attribute
apigw_models.Deployment.create_from_cloudformation_json = (
Deployment_create_from_cloudformation_json
)
apigw_models.Resource.create_from_cloudformation_json = (
Resource_create_from_cloudformation_json
)
apigw_models.Method.create_from_cloudformation_json = (
Method_create_from_cloudformation_json
)
# TODO: add support for AWS::ApiGateway::Model, AWS::ApiGateway::RequestValidator, ...
# fix AttributeError in moto's CloudFormation describe_stack_resource
def describe_stack_resource(self):
stack_name = self._get_param("StackName")
stack = self.cloudformation_backend.get_stack(stack_name)
logical_resource_id = self._get_param("LogicalResourceId")
for stack_resource in stack.stack_resources:
# Note: Line below has been patched
# if stack_resource.logical_resource_id == logical_resource_id:
if (
stack_resource
and stack_resource.logical_resource_id == logical_resource_id
):
resource = stack_resource
break
else:
raise ValidationError(logical_resource_id)
template = self.response_template(
responses.DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE
)
return template.render(stack=stack, resource=resource)
responses.CloudFormationResponse.describe_stack_resource = describe_stack_resource
|
https://github.com/localstack/localstack/issues/1642
|
{"Type": "Server", "message": "Error executing Lambda function arn:aws:lambda:ap-northeast-1:000000000000:function:service-stage-functionName: An error occurred (ResourceAlreadyExistsException) when calling the CreateLogGroup operation: The specified log group already exists Traceback (most recent call last):\n
File \"/opt/code/localstack/localstack/services/awslambda/lambda_api.py\", line 338, in run_lambda\n
event, context=context, version=version, asynchronous=asynchronous)\n
File \"/opt/code/localstack/localstack/services/awslambda/lambda_executors.py\", line 81, in execute\n
return do_execute()\n File \"/opt/code/localstack/localstack/services/awslambda/lambda_executors.py\", line 70, in do_execute\n self._store_logs(func_details, log_output, invocation_time)\n
File \"/opt/code/localstack/localstack/services/awslambda/lambda_executors.py\", line 105, in _store_logs\n logs_client.create_log_group(logGroupName=log_group_name)\n
File \"/opt/code/localstack/.venv/lib/python3.6/site-packages/botocore/client.py\", line 357, in _api_call\n
return self._make_api_call(operation_name, kwargs)\n
File \"/opt/code/localstack/.venv/lib/python3.6/site-packages/botocore/client.py\", line 661, in _make_api_call\n
raise error_class(parsed_response, operation_name)\n
botocore.errorfactory.ResourceAlreadyExistsException: An error occurred (ResourceAlreadyExistsException) when calling the CreateLogGroup operation: The specified log group already exists\n", "__type": "InternalFailure"}
|
botocore.errorfactory.ResourceAlreadyExistsException
|
def _parse_and_create_resource(
logical_id, resource_json, resources_map, region_name, update=False
):
stack_name = resources_map.get("AWS::StackName")
resource_hash_key = (stack_name, logical_id)
# If the current stack is being updated, avoid infinite recursion
updating = CURRENTLY_UPDATING_RESOURCES.get(resource_hash_key)
LOG.debug(
"Currently updating stack resource %s/%s: %s"
% (stack_name, logical_id, updating)
)
if updating:
return None
# parse and get final resource JSON
resource_tuple = parsing.parse_resource(logical_id, resource_json, resources_map)
if not resource_tuple:
return None
_, resource_json, _ = resource_tuple
# add some missing default props which otherwise cause deployments to fail
props = resource_json["Properties"] = resource_json.get("Properties") or {}
if resource_json["Type"] == "AWS::Lambda::EventSourceMapping" and not props.get(
"StartingPosition"
):
props["StartingPosition"] = "LATEST"
# check if this resource already exists in the resource map
resource = resources_map._parsed_resources.get(logical_id)
if resource and not update:
return resource
# check whether this resource needs to be deployed
resource_wrapped = {logical_id: resource_json}
should_be_created = template_deployer.should_be_deployed(
logical_id, resource_wrapped, stack_name
)
if not should_be_created:
# This resource is either not deployable or already exists. Check if it can be updated
if not template_deployer.is_updateable(
logical_id, resource_wrapped, stack_name
):
LOG.debug(
"Resource %s need not be deployed: %s" % (logical_id, resource_json)
)
if resource:
return resource
# fix resource ARNs, make sure to convert account IDs 000000000000 to 123456789012
resource_json_arns_fixed = clone(json_safe(convert_objs_to_ids(resource_json)))
set_moto_account_ids(resource_json_arns_fixed)
# create resource definition and store CloudFormation metadata in moto
if resource or update:
parse_and_update_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
elif not resource:
resource = parse_and_create_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
# Fix for moto which sometimes hard-codes region name as 'us-east-1'
if hasattr(resource, "region_name") and resource.region_name != region_name:
LOG.debug(
"Updating incorrect region from %s to %s"
% (resource.region_name, region_name)
)
resource.region_name = region_name
# Apply some fixes/patches to the resource names, then deploy resource in LocalStack
update_resource_name(resource, resource_json)
LOG.debug("Deploying CloudFormation resource: %s" % resource_json)
try:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = True
deploy_func = (
template_deployer.update_resource
if update
else template_deployer.deploy_resource
)
result = deploy_func(logical_id, resource_wrapped, stack_name=stack_name)
finally:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = False
if not should_be_created:
# skip the parts below for update requests
return resource
def find_id(resource):
"""Find ID of the given resource."""
for id_attr in ("Id", "id", "ResourceId", "RestApiId", "DeploymentId"):
if id_attr in resource:
return resource[id_attr]
# update resource IDs to avoid mismatch between CF moto and LocalStack backend resources
if hasattr(resource, "id") or (isinstance(resource, dict) and resource.get("id")):
existing_id = resource.id if hasattr(resource, "id") else resource["id"]
new_res_id = find_id(result)
LOG.debug(
"Updating resource id: %s - %s, %s - %s"
% (existing_id, new_res_id, resource, resource_json)
)
if new_res_id:
LOG.info(
"Updating resource ID from %s to %s (%s)"
% (existing_id, new_res_id, region_name)
)
update_resource_id(resource, new_res_id, props, region_name)
else:
LOG.warning(
"Unable to extract id for resource %s: %s" % (logical_id, result)
)
# update physical_resource_id field
update_physical_resource_id(resource)
return resource
|
def _parse_and_create_resource(logical_id, resource_json, resources_map, region_name):
stack_name = resources_map.get("AWS::StackName")
resource_hash_key = (stack_name, logical_id)
# If the current stack is being updated, avoid infinite recursion
updating = CURRENTLY_UPDATING_RESOURCES.get(resource_hash_key)
LOG.debug(
"Currently updating stack resource %s/%s: %s"
% (stack_name, logical_id, updating)
)
if updating:
return None
# parse and get final resource JSON
resource_tuple = parsing.parse_resource(logical_id, resource_json, resources_map)
if not resource_tuple:
return None
_, resource_json, _ = resource_tuple
# add some missing default props which otherwise cause deployments to fail
props = resource_json["Properties"] = resource_json.get("Properties") or {}
if resource_json["Type"] == "AWS::Lambda::EventSourceMapping" and not props.get(
"StartingPosition"
):
props["StartingPosition"] = "LATEST"
# check if this resource already exists in the resource map
resource = resources_map._parsed_resources.get(logical_id)
# check whether this resource needs to be deployed
resource_wrapped = {logical_id: resource_json}
should_be_created = template_deployer.should_be_deployed(
logical_id, resource_wrapped, stack_name
)
if not should_be_created:
# This resource is either not deployable or already exists. Check if it can be updated
if not template_deployer.is_updateable(
logical_id, resource_wrapped, stack_name
):
LOG.debug(
"Resource %s need not be deployed: %s" % (logical_id, resource_json)
)
if resource:
return resource
if not resource:
# fix resource ARNs, make sure to convert account IDs 000000000000 to 123456789012
resource_json_arns_fixed = clone(json_safe(convert_objs_to_ids(resource_json)))
set_moto_account_ids(resource_json_arns_fixed)
# create resource definition and store CloudFormation metadata in moto
resource = parse_and_create_resource_orig(
logical_id, resource_json_arns_fixed, resources_map, region_name
)
# Fix for moto which sometimes hard-codes region name as 'us-east-1'
if hasattr(resource, "region_name") and resource.region_name != region_name:
LOG.debug(
"Updating incorrect region from %s to %s"
% (resource.region_name, region_name)
)
resource.region_name = region_name
# Apply some fixes/patches to the resource names, then deploy resource in LocalStack
update_resource_name(resource, resource_json)
LOG.debug("Deploying CloudFormation resource: %s" % resource_json)
try:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = True
deploy_func = (
template_deployer.deploy_resource
if should_be_created
else template_deployer.update_resource
)
result = deploy_func(logical_id, resource_wrapped, stack_name=stack_name)
finally:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = False
if not should_be_created:
# skip the parts below for update requests
return resource
def find_id(resource):
"""Find ID of the given resource."""
for id_attr in ("Id", "id", "ResourceId", "RestApiId", "DeploymentId"):
if id_attr in resource:
return resource[id_attr]
# update resource IDs to avoid mismatch between CF moto and LocalStack backend resources
if hasattr(resource, "id") or (isinstance(resource, dict) and resource.get("id")):
existing_id = resource.id if hasattr(resource, "id") else resource["id"]
new_res_id = find_id(result)
LOG.debug(
"Updating resource id: %s - %s, %s - %s"
% (existing_id, new_res_id, resource, resource_json)
)
if new_res_id:
LOG.info(
"Updating resource ID from %s to %s (%s)"
% (existing_id, new_res_id, region_name)
)
update_resource_id(resource, new_res_id, props, region_name)
else:
LOG.warning(
"Unable to extract id for resource %s: %s" % (logical_id, result)
)
# update physical_resource_id field
update_physical_resource_id(resource)
return resource
|
https://github.com/localstack/localstack/issues/1642
|
{"Type": "Server", "message": "Error executing Lambda function arn:aws:lambda:ap-northeast-1:000000000000:function:service-stage-functionName: An error occurred (ResourceAlreadyExistsException) when calling the CreateLogGroup operation: The specified log group already exists Traceback (most recent call last):\n
File \"/opt/code/localstack/localstack/services/awslambda/lambda_api.py\", line 338, in run_lambda\n
event, context=context, version=version, asynchronous=asynchronous)\n
File \"/opt/code/localstack/localstack/services/awslambda/lambda_executors.py\", line 81, in execute\n
return do_execute()\n File \"/opt/code/localstack/localstack/services/awslambda/lambda_executors.py\", line 70, in do_execute\n self._store_logs(func_details, log_output, invocation_time)\n
File \"/opt/code/localstack/localstack/services/awslambda/lambda_executors.py\", line 105, in _store_logs\n logs_client.create_log_group(logGroupName=log_group_name)\n
File \"/opt/code/localstack/.venv/lib/python3.6/site-packages/botocore/client.py\", line 357, in _api_call\n
return self._make_api_call(operation_name, kwargs)\n
File \"/opt/code/localstack/.venv/lib/python3.6/site-packages/botocore/client.py\", line 661, in _make_api_call\n
raise error_class(parsed_response, operation_name)\n
botocore.errorfactory.ResourceAlreadyExistsException: An error occurred (ResourceAlreadyExistsException) when calling the CreateLogGroup operation: The specified log group already exists\n", "__type": "InternalFailure"}
|
botocore.errorfactory.ResourceAlreadyExistsException
|
def _send_cors_headers(self, response=None):
headers = response and response.headers or {}
if "Access-Control-Allow-Origin" not in headers:
self.send_header("Access-Control-Allow-Origin", "*")
if "Access-Control-Allow-Methods" not in headers:
self.send_header("Access-Control-Allow-Methods", ",".join(CORS_ALLOWED_METHODS))
if "Access-Control-Allow-Headers" not in headers:
requested_headers = self.headers.get("Access-Control-Request-Headers", "")
requested_headers = (
re.split(r"[,\s]+", requested_headers) + CORS_ALLOWED_HEADERS
)
self.send_header(
"Access-Control-Allow-Headers",
",".join([h for h in requested_headers if h]),
)
if "Access-Control-Expose-Headers" not in headers:
self.send_header("Access-Control-Expose-Headers", ",".join(CORS_EXPOSE_HEADERS))
|
def _send_cors_headers(self, response=None):
headers = response and response.headers or {}
if "Access-Control-Allow-Origin" not in headers:
self.send_header("Access-Control-Allow-Origin", "*")
if "Access-Control-Allow-Methods" not in headers:
self.send_header("Access-Control-Allow-Methods", ",".join(CORS_ALLOWED_METHODS))
if "Access-Control-Allow-Headers" not in headers:
requested_headers = self.headers.get("Access-Control-Request-Headers", "")
requested_headers = (
re.split(r"[,\s]+", requested_headers) + CORS_ALLOWED_HEADERS
)
self.send_header("Access-Control-Allow-Headers", ",".join(requested_headers))
if "Access-Control-Expose-Headers" not in headers:
self.send_header("Access-Control-Expose-Headers", ",".join(CORS_EXPOSE_HEADERS))
|
https://github.com/localstack/localstack/issues/1551
|
localstack_1 | 2019-09-09T10:46:22:ERROR:localstack.services.generic_proxy: Error forwarding request: 'QueueUrl' Traceback (most recent call last):
localstack_1 | File "/opt/code/localstack/localstack/services/generic_proxy.py", line 234, in forward
localstack_1 | path=path, data=data, headers=forward_headers)
localstack_1 | File "/opt/code/localstack/localstack/services/sqs/sqs_listener.py", line 53, in forward_request
localstack_1 | self._set_queue_attributes(req_data)
localstack_1 | File "/opt/code/localstack/localstack/services/sqs/sqs_listener.py", line 245, in _set_queue_attributes
localstack_1 | queue_url = req_data['QueueUrl'][0]
localstack_1 | KeyError: 'QueueUrl'
|
KeyError
|
def start_infra(asynchronous=False, apis=None):
try:
is_in_docker = in_docker()
# print a warning if we're not running in Docker but using Docker based LAMBDA_EXECUTOR
if not is_in_docker and "docker" in config.LAMBDA_EXECUTOR and not is_linux():
print(
(
"!WARNING! - Running outside of Docker with LAMBDA_EXECUTOR=%s can lead to "
"problems on your OS. The environment variable $LOCALSTACK_HOSTNAME may not "
"be properly set in your Lambdas."
)
% config.LAMBDA_EXECUTOR
)
# load plugins
load_plugins()
event_publisher.fire_event(
event_publisher.EVENT_START_INFRA,
{"d": is_in_docker and 1 or 0, "c": in_ci() and 1 or 0},
)
# set up logging
setup_logging()
# prepare APIs
apis = canonicalize_api_names(apis)
# set environment
os.environ["AWS_REGION"] = config.DEFAULT_REGION
os.environ["ENV"] = ENV_DEV
# register signal handlers
if not os.environ.get(ENV_INTERNAL_TEST_RUN):
register_signal_handlers()
# make sure AWS credentials are configured, otherwise boto3 bails on us
check_aws_credentials()
# install libs if not present
install.install_components(apis)
# Some services take a bit to come up
sleep_time = 5
# start services
thread = None
if "elasticsearch" in apis or "es" in apis:
sleep_time = max(sleep_time, 10)
# loop through plugins and start each service
for name, plugin in SERVICE_PLUGINS.items():
if name in apis:
t1 = plugin.start(asynchronous=True)
thread = thread or t1
time.sleep(sleep_time)
# ensure that all infra components are up and running
check_infra(apis=apis)
# restore persisted data
restore_persisted_data(apis=apis)
print("Ready.")
sys.stdout.flush()
if not asynchronous and thread:
# this is a bit of an ugly hack, but we need to make sure that we
# stay in the execution context of the main thread, otherwise our
# signal handlers don't work
while True:
time.sleep(1)
return thread
except KeyboardInterrupt:
print("Shutdown")
except Exception as e:
print("Error starting infrastructure: %s %s" % (e, traceback.format_exc()))
sys.stdout.flush()
raise e
finally:
if not asynchronous:
stop_infra()
|
def start_infra(asynchronous=False, apis=None):
try:
# load plugins
load_plugins()
event_publisher.fire_event(
event_publisher.EVENT_START_INFRA,
{"d": in_docker() and 1 or 0, "c": in_ci() and 1 or 0},
)
# set up logging
setup_logging()
# prepare APIs
apis = canonicalize_api_names(apis)
# set environment
os.environ["AWS_REGION"] = config.DEFAULT_REGION
os.environ["ENV"] = ENV_DEV
# register signal handlers
if not os.environ.get(ENV_INTERNAL_TEST_RUN):
register_signal_handlers()
# make sure AWS credentials are configured, otherwise boto3 bails on us
check_aws_credentials()
# install libs if not present
install.install_components(apis)
# Some services take a bit to come up
sleep_time = 5
# start services
thread = None
if "elasticsearch" in apis or "es" in apis:
sleep_time = max(sleep_time, 10)
# loop through plugins and start each service
for name, plugin in SERVICE_PLUGINS.items():
if name in apis:
t1 = plugin.start(asynchronous=True)
thread = thread or t1
time.sleep(sleep_time)
# ensure that all infra components are up and running
check_infra(apis=apis)
# restore persisted data
restore_persisted_data(apis=apis)
print("Ready.")
sys.stdout.flush()
if not asynchronous and thread:
# this is a bit of an ugly hack, but we need to make sure that we
# stay in the execution context of the main thread, otherwise our
# signal handlers don't work
while True:
time.sleep(1)
return thread
except KeyboardInterrupt:
print("Shutdown")
except Exception as e:
print("Error starting infrastructure: %s %s" % (e, traceback.format_exc()))
sys.stdout.flush()
raise e
finally:
if not asynchronous:
stop_infra()
|
https://github.com/localstack/localstack/issues/1551
|
localstack_1 | 2019-09-09T10:46:22:ERROR:localstack.services.generic_proxy: Error forwarding request: 'QueueUrl' Traceback (most recent call last):
localstack_1 | File "/opt/code/localstack/localstack/services/generic_proxy.py", line 234, in forward
localstack_1 | path=path, data=data, headers=forward_headers)
localstack_1 | File "/opt/code/localstack/localstack/services/sqs/sqs_listener.py", line 53, in forward_request
localstack_1 | self._set_queue_attributes(req_data)
localstack_1 | File "/opt/code/localstack/localstack/services/sqs/sqs_listener.py", line 245, in _set_queue_attributes
localstack_1 | queue_url = req_data['QueueUrl'][0]
localstack_1 | KeyError: 'QueueUrl'
|
KeyError
|
def forward_request(self, method, path, data, headers):
if method == "OPTIONS":
return 200
req_data = self.parse_request_data(method, path, data)
if req_data:
action = req_data.get("Action", [None])[0]
if action == "SendMessage":
new_response = self._send_message(path, data, req_data, headers)
if new_response:
return new_response
elif action == "SetQueueAttributes":
self._set_queue_attributes(path, req_data, headers)
if "QueueName" in req_data:
encoded_data = urlencode(req_data, doseq=True) if method == "POST" else ""
modified_url = None
if method == "GET":
base_path = path.partition("?")[0]
modified_url = "%s?%s" % (base_path, urlencode(req_data, doseq=True))
request = Request(
data=encoded_data, url=modified_url, headers=headers, method=method
)
return request
return True
|
def forward_request(self, method, path, data, headers):
if method == "OPTIONS":
return 200
req_data = self.parse_request_data(method, path, data)
if req_data:
action = req_data.get("Action", [None])[0]
if action == "SendMessage":
new_response = self._send_message(path, data, req_data, headers)
if new_response:
return new_response
elif action == "SetQueueAttributes":
self._set_queue_attributes(req_data)
if "QueueName" in req_data:
encoded_data = urlencode(req_data, doseq=True) if method == "POST" else ""
modified_url = None
if method == "GET":
base_path = path.partition("?")[0]
modified_url = "%s?%s" % (base_path, urlencode(req_data, doseq=True))
request = Request(
data=encoded_data, url=modified_url, headers=headers, method=method
)
return request
return True
|
https://github.com/localstack/localstack/issues/1551
|
localstack_1 | 2019-09-09T10:46:22:ERROR:localstack.services.generic_proxy: Error forwarding request: 'QueueUrl' Traceback (most recent call last):
localstack_1 | File "/opt/code/localstack/localstack/services/generic_proxy.py", line 234, in forward
localstack_1 | path=path, data=data, headers=forward_headers)
localstack_1 | File "/opt/code/localstack/localstack/services/sqs/sqs_listener.py", line 53, in forward_request
localstack_1 | self._set_queue_attributes(req_data)
localstack_1 | File "/opt/code/localstack/localstack/services/sqs/sqs_listener.py", line 245, in _set_queue_attributes
localstack_1 | queue_url = req_data['QueueUrl'][0]
localstack_1 | KeyError: 'QueueUrl'
|
KeyError
|
def return_response(self, method, path, data, headers, response, request_handler):
if method == "OPTIONS" and path == "/":
# Allow CORS preflight requests to succeed.
return 200
if method != "POST":
return
region_name = extract_region_from_auth_header(headers)
req_data = urlparse.parse_qs(to_str(data))
action = req_data.get("Action", [None])[0]
content_str = content_str_original = to_str(response.content)
self._fire_event(req_data, response)
# patch the response and add missing attributes
if action == "GetQueueAttributes":
content_str = self._add_queue_attributes(path, req_data, content_str, headers)
# patch the response and return the correct endpoint URLs / ARNs
if action in ("CreateQueue", "GetQueueUrl", "ListQueues", "GetQueueAttributes"):
if config.USE_SSL and "<QueueUrl>http://" in content_str:
# return https://... if we're supposed to use SSL
content_str = re.sub(
r"<QueueUrl>\s*http://", r"<QueueUrl>https://", content_str
)
# expose external hostname:port
external_port = SQS_PORT_EXTERNAL or get_external_port(headers, request_handler)
content_str = re.sub(
r"<QueueUrl>\s*([a-z]+)://[^<]*:([0-9]+)/([^<]*)\s*</QueueUrl>",
r"<QueueUrl>\1://%s:%s/\3</QueueUrl>" % (HOSTNAME_EXTERNAL, external_port),
content_str,
)
# fix queue ARN
content_str = re.sub(
r"<([a-zA-Z0-9]+)>\s*arn:aws:sqs:elasticmq:([^<]+)</([a-zA-Z0-9]+)>",
r"<\1>arn:aws:sqs:%s:\2</\3>" % (region_name),
content_str,
)
if content_str_original != content_str:
# if changes have been made, return patched response
new_response = Response()
new_response.status_code = response.status_code
new_response.headers = response.headers
new_response._content = content_str
new_response.headers["content-length"] = len(new_response._content)
return new_response
# Since the following 2 API calls are not implemented in ElasticMQ, we're mocking them
# and letting them to return an empty response
if action == "TagQueue":
new_response = Response()
new_response.status_code = 200
new_response._content = (
(
"""
<?xml version="1.0"?>
<TagQueueResponse>
<ResponseMetadata>
<RequestId>{}</RequestId>
</ResponseMetadata>
</TagQueueResponse>
"""
)
.strip()
.format(uuid.uuid4())
)
return new_response
elif action == "ListQueueTags":
new_response = Response()
new_response.status_code = 200
new_response._content = (
(
"""
<?xml version="1.0"?>
<ListQueueTagsResponse xmlns="{}">
<ListQueueTagsResult/>
<ResponseMetadata>
<RequestId>{}</RequestId>
</ResponseMetadata>
</ListQueueTagsResponse>
"""
)
.strip()
.format(XMLNS_SQS, uuid.uuid4())
)
return new_response
|
def return_response(self, method, path, data, headers, response, request_handler):
if method == "OPTIONS" and path == "/":
# Allow CORS preflight requests to succeed.
return 200
if method == "POST" and path == "/":
region_name = extract_region_from_auth_header(headers)
req_data = urlparse.parse_qs(to_str(data))
action = req_data.get("Action", [None])[0]
content_str = content_str_original = to_str(response.content)
self._fire_event(req_data, response)
# patch the response and add missing attributes
if action == "GetQueueAttributes":
content_str = self._add_queue_attributes(req_data, content_str)
# patch the response and return the correct endpoint URLs / ARNs
if action in ("CreateQueue", "GetQueueUrl", "ListQueues", "GetQueueAttributes"):
if config.USE_SSL and "<QueueUrl>http://" in content_str:
# return https://... if we're supposed to use SSL
content_str = re.sub(
r"<QueueUrl>\s*http://", r"<QueueUrl>https://", content_str
)
# expose external hostname:port
external_port = SQS_PORT_EXTERNAL or get_external_port(
headers, request_handler
)
content_str = re.sub(
r"<QueueUrl>\s*([a-z]+)://[^<]*:([0-9]+)/([^<]*)\s*</QueueUrl>",
r"<QueueUrl>\1://%s:%s/\3</QueueUrl>"
% (HOSTNAME_EXTERNAL, external_port),
content_str,
)
# fix queue ARN
content_str = re.sub(
r"<([a-zA-Z0-9]+)>\s*arn:aws:sqs:elasticmq:([^<]+)</([a-zA-Z0-9]+)>",
r"<\1>arn:aws:sqs:%s:\2</\3>" % (region_name),
content_str,
)
if content_str_original != content_str:
# if changes have been made, return patched response
new_response = Response()
new_response.status_code = response.status_code
new_response.headers = response.headers
new_response._content = content_str
new_response.headers["content-length"] = len(new_response._content)
return new_response
# Since the following 2 API calls are not implemented in ElasticMQ, we're mocking them
# and letting them to return an empty response
if action == "TagQueue":
new_response = Response()
new_response.status_code = 200
new_response._content = (
(
"""
<?xml version="1.0"?>
<TagQueueResponse>
<ResponseMetadata>
<RequestId>{}</RequestId>
</ResponseMetadata>
</TagQueueResponse>
"""
)
.strip()
.format(uuid.uuid4())
)
return new_response
elif action == "ListQueueTags":
new_response = Response()
new_response.status_code = 200
new_response._content = (
(
"""
<?xml version="1.0"?>
<ListQueueTagsResponse xmlns="{}">
<ListQueueTagsResult/>
<ResponseMetadata>
<RequestId>{}</RequestId>
</ResponseMetadata>
</ListQueueTagsResponse>
"""
)
.strip()
.format(XMLNS_SQS, uuid.uuid4())
)
return new_response
|
https://github.com/localstack/localstack/issues/1551
|
localstack_1 | 2019-09-09T10:46:22:ERROR:localstack.services.generic_proxy: Error forwarding request: 'QueueUrl' Traceback (most recent call last):
localstack_1 | File "/opt/code/localstack/localstack/services/generic_proxy.py", line 234, in forward
localstack_1 | path=path, data=data, headers=forward_headers)
localstack_1 | File "/opt/code/localstack/localstack/services/sqs/sqs_listener.py", line 53, in forward_request
localstack_1 | self._set_queue_attributes(req_data)
localstack_1 | File "/opt/code/localstack/localstack/services/sqs/sqs_listener.py", line 245, in _set_queue_attributes
localstack_1 | queue_url = req_data['QueueUrl'][0]
localstack_1 | KeyError: 'QueueUrl'
|
KeyError
|
def _send_message(self, path, data, req_data, headers):
queue_url = self._queue_url(path, req_data, headers)
queue_name = queue_url[queue_url.rindex("/") + 1 :]
message_body = req_data.get("MessageBody", [None])[0]
message_attributes = self.format_message_attributes(req_data)
region_name = extract_region_from_auth_header(headers)
process_result = lambda_api.process_sqs_message(
message_body, message_attributes, queue_name, region_name=region_name
)
if process_result:
# If a Lambda was listening, do not add the message to the queue
new_response = Response()
new_response._content = SUCCESSFUL_SEND_MESSAGE_XML_TEMPLATE.format(
message_attr_hash=md5(data),
message_body_hash=md5(message_body),
message_id=str(uuid.uuid4()),
)
new_response.status_code = 200
return new_response
|
def _send_message(self, path, data, req_data, headers):
queue_url = req_data.get("QueueUrl", [path.partition("?")[0]])[0]
queue_name = queue_url[queue_url.rindex("/") + 1 :]
message_body = req_data.get("MessageBody", [None])[0]
message_attributes = self.format_message_attributes(req_data)
region_name = extract_region_from_auth_header(headers)
process_result = lambda_api.process_sqs_message(
message_body, message_attributes, queue_name, region_name=region_name
)
if process_result:
# If a Lambda was listening, do not add the message to the queue
new_response = Response()
new_response._content = SUCCESSFUL_SEND_MESSAGE_XML_TEMPLATE.format(
message_attr_hash=md5(data),
message_body_hash=md5(message_body),
message_id=str(uuid.uuid4()),
)
new_response.status_code = 200
return new_response
|
https://github.com/localstack/localstack/issues/1551
|
localstack_1 | 2019-09-09T10:46:22:ERROR:localstack.services.generic_proxy: Error forwarding request: 'QueueUrl' Traceback (most recent call last):
localstack_1 | File "/opt/code/localstack/localstack/services/generic_proxy.py", line 234, in forward
localstack_1 | path=path, data=data, headers=forward_headers)
localstack_1 | File "/opt/code/localstack/localstack/services/sqs/sqs_listener.py", line 53, in forward_request
localstack_1 | self._set_queue_attributes(req_data)
localstack_1 | File "/opt/code/localstack/localstack/services/sqs/sqs_listener.py", line 245, in _set_queue_attributes
localstack_1 | queue_url = req_data['QueueUrl'][0]
localstack_1 | KeyError: 'QueueUrl'
|
KeyError
|
def _set_queue_attributes(self, path, req_data, headers):
queue_url = self._queue_url(path, req_data, headers)
attrs = self._format_attributes(req_data)
# select only the attributes in UNSUPPORTED_ATTRIBUTE_NAMES
attrs = dict([(k, v) for k, v in attrs.items() if k in UNSUPPORTED_ATTRIBUTE_NAMES])
QUEUE_ATTRIBUTES[queue_url] = QUEUE_ATTRIBUTES.get(queue_url) or {}
QUEUE_ATTRIBUTES[queue_url].update(attrs)
|
def _set_queue_attributes(self, req_data):
queue_url = req_data["QueueUrl"][0]
attrs = self._format_attributes(req_data)
# select only the attributes in UNSUPPORTED_ATTRIBUTE_NAMES
attrs = dict([(k, v) for k, v in attrs.items() if k in UNSUPPORTED_ATTRIBUTE_NAMES])
QUEUE_ATTRIBUTES[queue_url] = QUEUE_ATTRIBUTES.get(queue_url) or {}
QUEUE_ATTRIBUTES[queue_url].update(attrs)
|
https://github.com/localstack/localstack/issues/1551
|
localstack_1 | 2019-09-09T10:46:22:ERROR:localstack.services.generic_proxy: Error forwarding request: 'QueueUrl' Traceback (most recent call last):
localstack_1 | File "/opt/code/localstack/localstack/services/generic_proxy.py", line 234, in forward
localstack_1 | path=path, data=data, headers=forward_headers)
localstack_1 | File "/opt/code/localstack/localstack/services/sqs/sqs_listener.py", line 53, in forward_request
localstack_1 | self._set_queue_attributes(req_data)
localstack_1 | File "/opt/code/localstack/localstack/services/sqs/sqs_listener.py", line 245, in _set_queue_attributes
localstack_1 | queue_url = req_data['QueueUrl'][0]
localstack_1 | KeyError: 'QueueUrl'
|
KeyError
|
def _add_queue_attributes(self, path, req_data, content_str, headers):
flags = re.MULTILINE | re.DOTALL
queue_url = self._queue_url(path, req_data, headers)
regex = r"(.*<GetQueueAttributesResult>)(.*)(</GetQueueAttributesResult>.*)"
attrs = re.sub(regex, r"\2", content_str, flags=flags)
for key, value in QUEUE_ATTRIBUTES.get(queue_url, {}).items():
if not re.match(r"<Name>\s*%s\s*</Name>" % key, attrs, flags=flags):
attrs += "<Attribute><Name>%s</Name><Value>%s</Value></Attribute>" % (
key,
value,
)
content_str = (
re.sub(regex, r"\1", content_str, flags=flags)
+ attrs
+ re.sub(regex, r"\3", content_str, flags=flags)
)
return content_str
|
def _add_queue_attributes(self, req_data, content_str):
flags = re.MULTILINE | re.DOTALL
queue_url = req_data["QueueUrl"][0]
regex = r"(.*<GetQueueAttributesResult>)(.*)(</GetQueueAttributesResult>.*)"
attrs = re.sub(regex, r"\2", content_str, flags=flags)
for key, value in QUEUE_ATTRIBUTES.get(queue_url, {}).items():
if not re.match(r"<Name>\s*%s\s*</Name>" % key, attrs, flags=flags):
attrs += "<Attribute><Name>%s</Name><Value>%s</Value></Attribute>" % (
key,
value,
)
content_str = (
re.sub(regex, r"\1", content_str, flags=flags)
+ attrs
+ re.sub(regex, r"\3", content_str, flags=flags)
)
return content_str
|
https://github.com/localstack/localstack/issues/1551
|
localstack_1 | 2019-09-09T10:46:22:ERROR:localstack.services.generic_proxy: Error forwarding request: 'QueueUrl' Traceback (most recent call last):
localstack_1 | File "/opt/code/localstack/localstack/services/generic_proxy.py", line 234, in forward
localstack_1 | path=path, data=data, headers=forward_headers)
localstack_1 | File "/opt/code/localstack/localstack/services/sqs/sqs_listener.py", line 53, in forward_request
localstack_1 | self._set_queue_attributes(req_data)
localstack_1 | File "/opt/code/localstack/localstack/services/sqs/sqs_listener.py", line 245, in _set_queue_attributes
localstack_1 | queue_url = req_data['QueueUrl'][0]
localstack_1 | KeyError: 'QueueUrl'
|
KeyError
|
def generate_ssl_cert(
target_file=None,
overwrite=False,
random=False,
return_content=False,
serial_number=None,
):
# Note: Do NOT import "OpenSSL" at the root scope
# (Our test Lambdas are importing this file but don't have the module installed)
from OpenSSL import crypto
if target_file and not overwrite and os.path.exists(target_file):
key_file_name = "%s.key" % target_file
cert_file_name = "%s.crt" % target_file
return target_file, cert_file_name, key_file_name
if random and target_file:
if "." in target_file:
target_file = target_file.replace(".", ".%s." % short_uid(), 1)
else:
target_file = "%s.%s" % (target_file, short_uid())
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
# create a self-signed cert
cert = crypto.X509()
subj = cert.get_subject()
subj.C = "AU"
subj.ST = "Some-State"
subj.L = "Some-Locality"
subj.O = "LocalStack Org" # noqa
subj.OU = "Testing"
subj.CN = "LocalStack"
serial_number = serial_number or 1001
cert.set_serial_number(serial_number)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, "sha1")
cert_file = StringIO()
key_file = StringIO()
cert_file.write(to_str(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)))
key_file.write(to_str(crypto.dump_privatekey(crypto.FILETYPE_PEM, k)))
cert_file_content = cert_file.getvalue().strip()
key_file_content = key_file.getvalue().strip()
file_content = "%s\n%s" % (key_file_content, cert_file_content)
if target_file:
# check existence to avoid permission denied issues:
# https://github.com/localstack/localstack/issues/1607
if not os.path.exists(target_file):
save_file(target_file, file_content)
key_file_name = "%s.key" % target_file
cert_file_name = "%s.crt" % target_file
save_file(key_file_name, key_file_content)
save_file(cert_file_name, cert_file_content)
TMP_FILES.append(target_file)
TMP_FILES.append(key_file_name)
TMP_FILES.append(cert_file_name)
if not return_content:
return target_file, cert_file_name, key_file_name
return file_content
|
def generate_ssl_cert(
target_file=None,
overwrite=False,
random=False,
return_content=False,
serial_number=None,
):
# Note: Do NOT import "OpenSSL" at the root scope
# (Our test Lambdas are importing this file but don't have the module installed)
from OpenSSL import crypto
if target_file and not overwrite and os.path.exists(target_file):
key_file_name = "%s.key" % target_file
cert_file_name = "%s.crt" % target_file
return target_file, cert_file_name, key_file_name
if random and target_file:
if "." in target_file:
target_file = target_file.replace(".", ".%s." % short_uid(), 1)
else:
target_file = "%s.%s" % (target_file, short_uid())
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
# create a self-signed cert
cert = crypto.X509()
subj = cert.get_subject()
subj.C = "AU"
subj.ST = "Some-State"
subj.L = "Some-Locality"
subj.O = "LocalStack Org" # noqa
subj.OU = "Testing"
subj.CN = "LocalStack"
serial_number = serial_number or 1001
cert.set_serial_number(serial_number)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, "sha1")
cert_file = StringIO()
key_file = StringIO()
cert_file.write(to_str(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)))
key_file.write(to_str(crypto.dump_privatekey(crypto.FILETYPE_PEM, k)))
cert_file_content = cert_file.getvalue().strip()
key_file_content = key_file.getvalue().strip()
file_content = "%s\n%s" % (key_file_content, cert_file_content)
if target_file:
save_file(target_file, file_content)
key_file_name = "%s.key" % target_file
cert_file_name = "%s.crt" % target_file
save_file(key_file_name, key_file_content)
save_file(cert_file_name, cert_file_content)
TMP_FILES.append(target_file)
TMP_FILES.append(key_file_name)
TMP_FILES.append(cert_file_name)
if not return_content:
return target_file, cert_file_name, key_file_name
return file_content
|
https://github.com/localstack/localstack/issues/1551
|
localstack_1 | 2019-09-09T10:46:22:ERROR:localstack.services.generic_proxy: Error forwarding request: 'QueueUrl' Traceback (most recent call last):
localstack_1 | File "/opt/code/localstack/localstack/services/generic_proxy.py", line 234, in forward
localstack_1 | path=path, data=data, headers=forward_headers)
localstack_1 | File "/opt/code/localstack/localstack/services/sqs/sqs_listener.py", line 53, in forward_request
localstack_1 | self._set_queue_attributes(req_data)
localstack_1 | File "/opt/code/localstack/localstack/services/sqs/sqs_listener.py", line 245, in _set_queue_attributes
localstack_1 | queue_url = req_data['QueueUrl'][0]
localstack_1 | KeyError: 'QueueUrl'
|
KeyError
|
def forward_request(self, method, path, data, headers):
data = data and json.loads(to_str(data))
if re.match(PATH_REGEX_USER_REQUEST, path):
search_match = re.search(PATH_REGEX_USER_REQUEST, path)
api_id = search_match.group(1)
stage = search_match.group(2)
relative_path_w_query_params = "/%s" % search_match.group(3)
relative_path, query_string_params = extract_query_string_params(
path=relative_path_w_query_params
)
path_map = helpers.get_rest_api_paths(rest_api_id=api_id)
try:
extracted_path, resource = get_resource_for_path(
path=relative_path, path_map=path_map
)
except Exception:
return make_error("Unable to find path %s" % path, 404)
integrations = resource.get("resourceMethods", {})
integration = integrations.get(method, {})
if not integration:
integration = integrations.get("ANY", {})
integration = integration.get("methodIntegration")
if not integration:
if method == "OPTIONS" and "Origin" in headers:
# default to returning CORS headers if this is an OPTIONS request
return get_cors_response(headers)
return make_error("Unable to find integration for path %s" % path, 404)
uri = integration.get("uri")
if method == "POST" and integration["type"] == "AWS":
if uri.endswith("kinesis:action/PutRecords"):
template = integration["requestTemplates"][APPLICATION_JSON]
new_request = aws_stack.render_velocity_template(template, data)
# forward records to target kinesis stream
headers = aws_stack.mock_aws_request_headers(service="kinesis")
headers["X-Amz-Target"] = kinesis_listener.ACTION_PUT_RECORDS
result = common.make_http_request(
url=TEST_KINESIS_URL,
method="POST",
data=new_request,
headers=headers,
)
return result
elif uri.startswith("arn:aws:apigateway:") and ":sqs:path" in uri:
template = integration["requestTemplates"][APPLICATION_JSON]
account_id, queue = uri.split("/")[-2:]
region_name = uri.split(":")[3]
new_request = (
aws_stack.render_velocity_template(template, data)
+ "&QueueName=%s" % queue
)
headers = aws_stack.mock_aws_request_headers(
service="sqs", region_name=region_name
)
url = urljoin(
TEST_SQS_URL, "%s/%s?%s" % (account_id, queue, new_request)
)
result = common.make_http_request(url, method="GET", headers=headers)
return result
else:
msg = 'API Gateway action uri "%s" not yet implemented' % uri
LOGGER.warning(msg)
return make_error(msg, 404)
elif integration["type"] == "AWS_PROXY":
if uri.startswith("arn:aws:apigateway:") and ":lambda:path" in uri:
func_arn = (
uri.split(":lambda:path")[1]
.split("functions/")[1]
.split("/invocations")[0]
)
data_str = json.dumps(data) if isinstance(data, (dict, list)) else data
account_id = (
uri.split(":lambda:path")[1].split(":function:")[0].split(":")[-1]
)
source_ip = headers["X-Forwarded-For"].split(",")[-2]
# Sample request context:
# https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test
request_context = {
"path": relative_path,
"accountId": account_id,
"resourceId": resource.get("id"),
"stage": stage,
"identity": {
"accountId": account_id,
"sourceIp": source_ip,
"userAgent": headers["User-Agent"],
},
}
try:
path_params = extract_path_params(
path=relative_path, extracted_path=extracted_path
)
except Exception:
path_params = {}
result = lambda_api.process_apigateway_invocation(
func_arn,
relative_path,
data_str,
headers,
path_params=path_params,
query_string_params=query_string_params,
method=method,
resource_path=path,
request_context=request_context,
)
if isinstance(result, FlaskResponse):
return flask_to_requests_response(result)
if isinstance(result, Response):
return result
response = Response()
parsed_result = (
result if isinstance(result, dict) else json.loads(result)
)
parsed_result = common.json_safe(parsed_result)
response.status_code = int(parsed_result.get("statusCode", 200))
response.headers.update(parsed_result.get("headers", {}))
try:
if isinstance(parsed_result["body"], dict):
response._content = json.dumps(parsed_result["body"])
else:
response._content = parsed_result["body"]
except Exception:
response._content = "{}"
return response
else:
msg = 'API Gateway action uri "%s" not yet implemented' % uri
LOGGER.warning(msg)
return make_error(msg, 404)
elif integration["type"] == "HTTP":
function = getattr(requests, method.lower())
if isinstance(data, dict):
data = json.dumps(data)
result = function(integration["uri"], data=data, headers=headers)
return result
else:
msg = (
'API Gateway integration type "%s" for method "%s" not yet implemented'
% (integration["type"], method)
)
LOGGER.warning(msg)
return make_error(msg, 404)
return 200
if re.match(PATH_REGEX_AUTHORIZERS, path):
return handle_authorizers(method, path, data, headers)
return True
|
def forward_request(self, method, path, data, headers):
data = data and json.loads(to_str(data))
if re.match(PATH_REGEX_USER_REQUEST, path):
search_match = re.search(PATH_REGEX_USER_REQUEST, path)
api_id = search_match.group(1)
stage = search_match.group(2)
relative_path_w_query_params = "/%s" % search_match.group(3)
relative_path, query_string_params = extract_query_string_params(
path=relative_path_w_query_params
)
path_map = helpers.get_rest_api_paths(rest_api_id=api_id)
try:
extracted_path, resource = get_resource_for_path(
path=relative_path, path_map=path_map
)
except Exception:
return make_error("Unable to find path %s" % path, 404)
integrations = resource.get("resourceMethods", {})
integration = integrations.get(method, {})
if not integration:
integration = integrations.get("ANY", {})
integration = integration.get("methodIntegration")
if not integration:
if method == "OPTIONS" and "Origin" in headers:
# default to returning CORS headers if this is an OPTIONS request
return get_cors_response(headers)
return make_error("Unable to find integration for path %s" % path, 404)
uri = integration.get("uri")
if method == "POST" and integration["type"] == "AWS":
if uri.endswith("kinesis:action/PutRecords"):
template = integration["requestTemplates"][APPLICATION_JSON]
new_request = aws_stack.render_velocity_template(template, data)
# forward records to target kinesis stream
headers = aws_stack.mock_aws_request_headers(service="kinesis")
headers["X-Amz-Target"] = kinesis_listener.ACTION_PUT_RECORDS
result = common.make_http_request(
url=TEST_KINESIS_URL,
method="POST",
data=new_request,
headers=headers,
)
return result
elif uri.startswith("arn:aws:apigateway:") and ":sqs:path" in uri:
template = integration["requestTemplates"][APPLICATION_JSON]
account_id, queue = uri.split("/")[-2:]
region_name = uri.split(":")[3]
new_request = (
aws_stack.render_velocity_template(template, data)
+ "&QueueName=%s" % queue
)
headers = aws_stack.mock_aws_request_headers(
service="sqs", region_name=region_name
)
url = urljoin(
TEST_SQS_URL, "%s/%s?%s" % (account_id, queue, new_request)
)
result = common.make_http_request(url, method="GET", headers=headers)
return result
else:
msg = 'API Gateway action uri "%s" not yet implemented' % uri
LOGGER.warning(msg)
return make_error(msg, 404)
elif integration["type"] == "AWS_PROXY":
if uri.startswith("arn:aws:apigateway:") and ":lambda:path" in uri:
func_arn = (
uri.split(":lambda:path")[1]
.split("functions/")[1]
.split("/invocations")[0]
)
data_str = json.dumps(data) if isinstance(data, (dict, list)) else data
account_id = (
uri.split(":lambda:path")[1].split(":function:")[0].split(":")[-1]
)
source_ip = headers["X-Forwarded-For"].split(",")[-2]
# Sample request context:
# https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test
request_context = {
"path": relative_path,
"accountId": account_id,
"resourceId": resource.get("id"),
"stage": stage,
"identity": {
"accountId": account_id,
"sourceIp": source_ip,
"userAgent": headers["User-Agent"],
},
}
try:
path_params = extract_path_params(
path=relative_path, extracted_path=extracted_path
)
except Exception:
path_params = {}
result = lambda_api.process_apigateway_invocation(
func_arn,
relative_path,
data_str,
headers,
path_params=path_params,
query_string_params=query_string_params,
method=method,
resource_path=path,
request_context=request_context,
)
if isinstance(result, FlaskResponse):
return flask_to_requests_response(result)
response = Response()
parsed_result = (
result if isinstance(result, dict) else json.loads(result)
)
parsed_result = common.json_safe(parsed_result)
response.status_code = int(parsed_result.get("statusCode", 200))
response.headers.update(parsed_result.get("headers", {}))
try:
if isinstance(parsed_result["body"], dict):
response._content = json.dumps(parsed_result["body"])
else:
response._content = parsed_result["body"]
except Exception:
response._content = "{}"
return response
else:
msg = 'API Gateway action uri "%s" not yet implemented' % uri
LOGGER.warning(msg)
return make_error(msg, 404)
elif integration["type"] == "HTTP":
function = getattr(requests, method.lower())
if isinstance(data, dict):
data = json.dumps(data)
result = function(integration["uri"], data=data, headers=headers)
return result
else:
msg = (
'API Gateway integration type "%s" for method "%s" not yet implemented'
% (integration["type"], method)
)
LOGGER.warning(msg)
return make_error(msg, 404)
return 200
if re.match(PATH_REGEX_AUTHORIZERS, path):
return handle_authorizers(method, path, data, headers)
return True
|
https://github.com/localstack/localstack/issues/438
|
2017-11-02T15:45:03:ERROR:localstack.services.generic_proxy: Error forwarding request: An error occurred (ValidationError) when calling the DescribeStackResources operation: Stack with id foo does not exist Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 185, in forward
path=path, data=data, headers=forward_headers, response=response)
File "/opt/code/localstack/localstack/services/cloudformation/cloudformation_listener.py", line 181, in return_response
template_deployer.deploy_template(template, req_data.get('StackName')[0])
File "/opt/code/localstack/localstack/utils/cloudformation/template_deployer.py", line 484, in deploy_template
stack_resources = describe_stack_resources(stack_name, resource_id)
File "/opt/code/localstack/localstack/utils/cloudformation/template_deployer.py", line 218, in describe_stack_resources
resources = client.describe_stack_resources(StackName=stack_name, LogicalResourceId=logical_resource_id)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 310, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 599, in _make_api_call
raise error_class(parsed_response, operation_name)
ClientError: An error occurred (ValidationError) when calling the DescribeStackResources operation: Stack with id foo does not exist
|
ClientError
|
def apply_patches():
"""Apply patches to make LocalStack seamlessly interact with the moto backend.
TODO: Eventually, these patches should be contributed to the upstream repo!"""
# Patch S3Backend.get_key method in moto to use S3 API from LocalStack
def get_key(self, bucket_name, key_name, version_id=None):
s3_client = aws_stack.connect_to_service("s3")
value = b""
if bucket_name != BUCKET_MARKER_LOCAL:
value = s3_client.get_object(Bucket=bucket_name, Key=key_name)[
"Body"
].read()
return s3_models.FakeKey(name=key_name, value=value)
s3_models.S3Backend.get_key = get_key
# Patch clean_json in moto
def clean_json(resource_json, resources_map):
result = clean_json_orig(resource_json, resources_map)
if isinstance(result, BaseModel):
if isinstance(resource_json, dict) and "Ref" in resource_json:
types_with_ref_as_id_or_name = (
apigw_models.RestAPI,
apigw_models.Resource,
)
attr_candidates = ["function_arn", "id", "name"]
for attr in attr_candidates:
if hasattr(result, attr):
if attr in ["id", "name"] and not isinstance(
result, types_with_ref_as_id_or_name
):
LOG.warning(
'Unable to find ARN, using "%s" instead: %s - %s',
attr,
resource_json,
result,
)
return getattr(result, attr)
LOG.warning(
'Unable to resolve "Ref" attribute for: %s - %s - %s',
resource_json,
result,
type(result),
)
return result
clean_json_orig = parsing.clean_json
parsing.clean_json = clean_json
# add model mappings to moto
parsing.MODEL_MAP.update(MODEL_MAP)
# Patch parse_and_create_resource method in moto to deploy resources in LocalStack
def parse_and_create_resource(
logical_id, resource_json, resources_map, region_name
):
try:
return _parse_and_create_resource(
logical_id, resource_json, resources_map, region_name
)
except Exception as e:
LOG.error('Unable to parse and create resource "%s": %s' % (logical_id, e))
raise
def _parse_and_create_resource(
logical_id, resource_json, resources_map, region_name
):
stack_name = resources_map.get("AWS::StackName")
resource_hash_key = (stack_name, logical_id)
# If the current stack is being updated, avoid infinite recursion
updating = CURRENTLY_UPDATING_RESOURCES.get(resource_hash_key)
LOG.debug(
"Currently updating stack resource %s/%s: %s"
% (stack_name, logical_id, updating)
)
if updating:
return None
# parse and get final resource JSON
resource_tuple = parsing.parse_resource(
logical_id, resource_json, resources_map
)
if not resource_tuple:
return None
_, resource_json, _ = resource_tuple
# add some missing default props which otherwise cause deployments to fail
props = resource_json["Properties"] = resource_json.get("Properties") or {}
if resource_json["Type"] == "AWS::Lambda::EventSourceMapping" and not props.get(
"StartingPosition"
):
props["StartingPosition"] = "LATEST"
# check if this resource already exists in the resource map
resource = resources_map._parsed_resources.get(logical_id)
# check whether this resource needs to be deployed
resource_wrapped = {logical_id: resource_json}
should_be_created = template_deployer.should_be_deployed(
logical_id, resource_wrapped, stack_name
)
if not should_be_created:
# This resource is either not deployable or already exists. Check if it can be updated
if not template_deployer.is_updateable(
logical_id, resource_wrapped, stack_name
):
LOG.debug(
"Resource %s need not be deployed: %s" % (logical_id, resource_json)
)
if resource:
return resource
if not resource:
# create resource definition and store CloudFormation metadata in moto
resource = parse_and_create_resource_orig(
logical_id, resource_json, resources_map, region_name
)
# Fix for moto which sometimes hard-codes region name as 'us-east-1'
if hasattr(resource, "region_name") and resource.region_name != region_name:
LOG.debug(
"Updating incorrect region from %s to %s"
% (resource.region_name, region_name)
)
resource.region_name = region_name
# Apply some fixes/patches to the resource names, then deploy resource in LocalStack
update_resource_name(resource, resource_json)
LOG.debug("Deploying CloudFormation resource: %s" % resource_json)
try:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = True
deploy_func = (
template_deployer.deploy_resource
if should_be_created
else template_deployer.update_resource
)
result = deploy_func(logical_id, resource_wrapped, stack_name=stack_name)
finally:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = False
if not should_be_created:
# skip the parts below for update requests
return resource
def find_id(resource):
"""Find ID of the given resource."""
for id_attr in ("Id", "id", "ResourceId", "RestApiId", "DeploymentId"):
if id_attr in resource:
return resource[id_attr]
# update resource IDs to avoid mismatch between CF moto and LocalStack backend resources
if hasattr(resource, "id") or (
isinstance(resource, dict) and resource.get("id")
):
existing_id = resource.id if hasattr(resource, "id") else resource["id"]
new_res_id = find_id(result)
LOG.debug(
"Updating resource id: %s - %s, %s - %s"
% (existing_id, new_res_id, resource, resource_json)
)
if new_res_id:
LOG.info(
"Updating resource ID from %s to %s (%s)"
% (existing_id, new_res_id, region_name)
)
update_resource_id(resource, new_res_id, props, region_name)
else:
LOG.warning(
"Unable to extract id for resource %s: %s" % (logical_id, result)
)
# update physical_resource_id field
update_physical_resource_id(resource)
return resource
def update_resource_name(resource, resource_json):
"""Some resources require minor fixes in their CF resource definition
before we can pass them on to deployment."""
props = resource_json["Properties"] = resource_json.get("Properties") or {}
if isinstance(resource, sfn_models.StateMachine) and not props.get(
"StateMachineName"
):
props["StateMachineName"] = resource.name
def update_resource_id(resource, new_id, props, region_name):
"""Update and fix the ID(s) of the given resource."""
# NOTE: this is a bit of a hack, which is required because
# of the order of events when CloudFormation resources are created.
# When we process a request to create a CF resource that's part of a
# stack, say, an API Gateway Resource, then we (1) create the object
# in memory in moto, which generates a random ID for the resource, and
# (2) create the actual resource in the backend service using
# template_deployer.deploy_resource(..) (see above).
# The resource created in (2) now has a different ID than the resource
# created in (1), which leads to downstream problems. Hence, we need
# the logic below to reconcile the ids, i.e., apply IDs from (2) to (1).
backend = apigw_models.apigateway_backends[region_name]
if isinstance(resource, apigw_models.RestAPI):
backend.apis.pop(resource.id, None)
backend.apis[new_id] = resource
# We also need to fetch the resources to replace the root resource
# that moto automatically adds to newly created RestAPI objects
client = aws_stack.connect_to_service("apigateway")
resources = client.get_resources(restApiId=new_id, limit=500)["items"]
# make sure no resources have been added in addition to the root /
assert len(resource.resources) == 1
resource.resources = {}
for res in resources:
res_path_part = res.get("pathPart") or res.get("path")
child = resource.add_child(res_path_part, res.get("parentId"))
resource.resources.pop(child.id)
child.id = res["id"]
child.api_id = new_id
resource.resources[child.id] = child
resource.id = new_id
elif isinstance(resource, apigw_models.Resource):
api_id = props["RestApiId"]
backend.apis[api_id].resources.pop(resource.id, None)
backend.apis[api_id].resources[new_id] = resource
resource.id = new_id
elif isinstance(resource, apigw_models.Deployment):
api_id = props["RestApiId"]
backend.apis[api_id].deployments.pop(resource["id"], None)
backend.apis[api_id].deployments[new_id] = resource
resource["id"] = new_id
else:
LOG.warning(
"Unexpected resource type when updating ID: %s" % type(resource)
)
def update_physical_resource_id(resource):
phys_res_id = (
getattr(resource, "physical_resource_id")
if hasattr(resource, "physical_resource_id")
else None
)
if not phys_res_id:
if isinstance(resource, lambda_models.LambdaFunction):
func_arn = aws_stack.lambda_function_arn(resource.function_name)
resource.function_arn = resource.physical_resource_id = func_arn
elif isinstance(resource, sfn_models.StateMachine):
sm_arn = aws_stack.state_machine_arn(resource.name)
resource.physical_resource_id = sm_arn
elif isinstance(resource, service_models.StepFunctionsActivity):
act_arn = aws_stack.stepfunctions_activity_arn(
resource.params.get("Name")
)
resource.physical_resource_id = act_arn
else:
LOG.warning(
"Unable to determine physical_resource_id for resource %s"
% type(resource)
)
parse_and_create_resource_orig = parsing.parse_and_create_resource
parsing.parse_and_create_resource = parse_and_create_resource
# Patch CloudFormation parse_output(..) method to fix a bug in moto
def parse_output(output_logical_id, output_json, resources_map):
try:
return parse_output_orig(output_logical_id, output_json, resources_map)
except KeyError:
output = Output()
output.key = output_logical_id
output.value = None
output.description = output_json.get("Description")
return output
parse_output_orig = parsing.parse_output
parsing.parse_output = parse_output
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB_Table_get_cfn_attribute(self, attribute_name):
try:
return DynamoDB_Table_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.dynamodb_table_arn(table_name=self.name)
raise
DynamoDB_Table_get_cfn_attribute_orig = dynamodb_models.Table.get_cfn_attribute
dynamodb_models.Table.get_cfn_attribute = DynamoDB_Table_get_cfn_attribute
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB2_Table_get_cfn_attribute(self, attribute_name):
if attribute_name == "Arn":
return aws_stack.dynamodb_table_arn(table_name=self.name)
dynamodb2_models.Table.get_cfn_attribute = DynamoDB2_Table_get_cfn_attribute
# Patch SQS get_cfn_attribute(..) method in moto
def SQS_Queue_get_cfn_attribute(self, attribute_name):
if attribute_name == "Arn":
return aws_stack.sqs_queue_arn(queue_name=self.name)
return SQS_Queue_get_cfn_attribute_orig(self, attribute_name)
SQS_Queue_get_cfn_attribute_orig = sqs_models.Queue.get_cfn_attribute
sqs_models.Queue.get_cfn_attribute = SQS_Queue_get_cfn_attribute
# Patch Lambda get_cfn_attribute(..) method in moto
def Lambda_Function_get_cfn_attribute(self, attribute_name):
try:
if attribute_name == "Arn":
return self.function_arn
return Lambda_Function_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name in ("Name", "FunctionName"):
return self.function_name
raise
Lambda_Function_get_cfn_attribute_orig = (
lambda_models.LambdaFunction.get_cfn_attribute
)
lambda_models.LambdaFunction.get_cfn_attribute = Lambda_Function_get_cfn_attribute
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB_Table_get_cfn_attribute(self, attribute_name):
try:
if attribute_name == "StreamArn":
streams = aws_stack.connect_to_service("dynamodbstreams").list_streams(
TableName=self.name
)["Streams"]
return streams[0]["StreamArn"] if streams else None
return DynamoDB_Table_get_cfn_attribute_orig(self, attribute_name)
except Exception as e:
LOG.warning(
'Unable to get attribute "%s" from resource %s: %s'
% (attribute_name, type(self), e)
)
raise
DynamoDB_Table_get_cfn_attribute_orig = dynamodb_models.Table.get_cfn_attribute
dynamodb_models.Table.get_cfn_attribute = DynamoDB_Table_get_cfn_attribute
# Patch IAM get_cfn_attribute(..) method in moto
def IAM_Role_get_cfn_attribute(self, attribute_name):
try:
return IAM_Role_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.role_arn(self.name)
raise
IAM_Role_get_cfn_attribute_orig = iam_models.Role.get_cfn_attribute
iam_models.Role.get_cfn_attribute = IAM_Role_get_cfn_attribute
# Patch LambdaFunction create_from_cloudformation_json(..) method in moto
@classmethod
def Lambda_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
resource_name = (
cloudformation_json.get("Properties", {}).get("FunctionName")
or resource_name
)
return Lambda_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
Lambda_create_from_cloudformation_json_orig = (
lambda_models.LambdaFunction.create_from_cloudformation_json
)
lambda_models.LambdaFunction.create_from_cloudformation_json = (
Lambda_create_from_cloudformation_json
)
# add CloudWatch types
parsing.MODEL_MAP["AWS::ApiGateway::Deployment"] = apigw_models.Deployment
parsing.MODEL_MAP["AWS::ApiGateway::Method"] = apigw_models.Method
parsing.MODEL_MAP["AWS::ApiGateway::Resource"] = apigw_models.Resource
parsing.MODEL_MAP["AWS::ApiGateway::RestApi"] = apigw_models.RestAPI
parsing.MODEL_MAP["AWS::StepFunctions::StateMachine"] = sfn_models.StateMachine
@classmethod
def RestAPI_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
name = props["Name"]
region_name = props.get("Region") or DEFAULT_REGION
description = props.get("Description") or ""
id = props.get("Id") or short_uid()
return apigw_models.RestAPI(id, region_name, name, description)
def RestAPI_get_cfn_attribute(self, attribute_name):
if attribute_name == "Id":
return self.id
if attribute_name == "Region":
return self.region_name
if attribute_name == "Name":
return self.name
if attribute_name == "Description":
return self.description
if attribute_name == "RootResourceId":
for id, resource in self.resources.items():
if resource.parent_id is None:
return resource.id
return None
raise UnformattedGetAttTemplateException()
@classmethod
def Deployment_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
name = props["StageName"]
deployment_id = props.get("Id") or short_uid()
description = props.get("Description") or ""
return apigw_models.Deployment(deployment_id, name, description)
@classmethod
def Resource_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
region_name = props.get("Region") or DEFAULT_REGION
path_part = props.get("PathPart")
api_id = props.get("RestApiId")
parent_id = props.get("ParentId")
id = props.get("Id") or short_uid()
return apigw_models.Resource(id, region_name, api_id, path_part, parent_id)
@classmethod
def Method_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
method_type = props.get("HttpMethod")
authorization_type = props.get("AuthorizationType")
return apigw_models.Method(method_type, authorization_type)
apigw_models.RestAPI.create_from_cloudformation_json = (
RestAPI_create_from_cloudformation_json
)
apigw_models.RestAPI.get_cfn_attribute = RestAPI_get_cfn_attribute
apigw_models.Deployment.create_from_cloudformation_json = (
Deployment_create_from_cloudformation_json
)
apigw_models.Resource.create_from_cloudformation_json = (
Resource_create_from_cloudformation_json
)
apigw_models.Method.create_from_cloudformation_json = (
Method_create_from_cloudformation_json
)
# TODO: add support for AWS::ApiGateway::Model, AWS::ApiGateway::RequestValidator, ...
# fix AttributeError in moto's CloudFormation describe_stack_resource
def describe_stack_resource(self):
stack_name = self._get_param("StackName")
stack = self.cloudformation_backend.get_stack(stack_name)
logical_resource_id = self._get_param("LogicalResourceId")
for stack_resource in stack.stack_resources:
# Note: Line below has been patched
# if stack_resource.logical_resource_id == logical_resource_id:
if (
stack_resource
and stack_resource.logical_resource_id == logical_resource_id
):
resource = stack_resource
break
else:
raise ValidationError(logical_resource_id)
template = self.response_template(
responses.DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE
)
return template.render(stack=stack, resource=resource)
responses.CloudFormationResponse.describe_stack_resource = describe_stack_resource
|
def apply_patches():
"""Apply patches to make LocalStack seamlessly interact with the moto backend.
TODO: Eventually, these patches should be contributed to the upstream repo!"""
# Patch S3Backend.get_key method in moto to use S3 API from LocalStack
def get_key(self, bucket_name, key_name, version_id=None):
s3_client = aws_stack.connect_to_service("s3")
value = b""
if bucket_name != BUCKET_MARKER_LOCAL:
value = s3_client.get_object(Bucket=bucket_name, Key=key_name)[
"Body"
].read()
return s3_models.FakeKey(name=key_name, value=value)
s3_models.S3Backend.get_key = get_key
# Patch clean_json in moto
def clean_json(resource_json, resources_map):
result = clean_json_orig(resource_json, resources_map)
if isinstance(result, BaseModel):
if isinstance(resource_json, dict) and "Ref" in resource_json:
types_with_ref_as_id_or_name = (
apigw_models.RestAPI,
apigw_models.Resource,
)
attr_candidates = ["function_arn", "id", "name"]
for attr in attr_candidates:
if hasattr(result, attr):
if attr in ["id", "name"] and not isinstance(
result, types_with_ref_as_id_or_name
):
LOG.warning(
'Unable to find ARN, using "%s" instead: %s - %s',
attr,
resource_json,
result,
)
return getattr(result, attr)
LOG.warning(
'Unable to resolve "Ref" attribute for: %s - %s - %s',
resource_json,
result,
type(result),
)
return result
clean_json_orig = parsing.clean_json
parsing.clean_json = clean_json
# add model mappings to moto
parsing.MODEL_MAP.update(MODEL_MAP)
# Patch parse_and_create_resource method in moto to deploy resources in LocalStack
def parse_and_create_resource(
logical_id, resource_json, resources_map, region_name
):
try:
return _parse_and_create_resource(
logical_id, resource_json, resources_map, region_name
)
except Exception as e:
LOG.error('Unable to parse and create resource "%s": %s' % (logical_id, e))
raise
def _parse_and_create_resource(
logical_id, resource_json, resources_map, region_name
):
stack_name = resources_map.get("AWS::StackName")
resource_hash_key = (stack_name, logical_id)
# If the current stack is being updated, avoid infinite recursion
updating = CURRENTLY_UPDATING_RESOURCES.get(resource_hash_key)
LOG.debug(
"Currently updating stack resource %s/%s: %s"
% (stack_name, logical_id, updating)
)
if updating:
return None
# parse and get final resource JSON
resource_tuple = parsing.parse_resource(
logical_id, resource_json, resources_map
)
if not resource_tuple:
return None
_, resource_json, _ = resource_tuple
# add some missing default props which otherwise cause deployments to fail
props = resource_json["Properties"] = resource_json.get("Properties") or {}
if resource_json["Type"] == "AWS::Lambda::EventSourceMapping" and not props.get(
"StartingPosition"
):
props["StartingPosition"] = "LATEST"
# check if this resource already exists in the resource map
resource = resources_map._parsed_resources.get(logical_id)
# check whether this resource needs to be deployed
resource_wrapped = {logical_id: resource_json}
should_be_created = template_deployer.should_be_deployed(
logical_id, resource_wrapped, stack_name
)
if not should_be_created:
# This resource is either not deployable or already exists. Check if it can be updated
if not template_deployer.is_updateable(
logical_id, resource_wrapped, stack_name
):
LOG.debug(
"Resource %s need not be deployed: %s" % (logical_id, resource_json)
)
if resource:
return resource
if not resource:
# create resource definition and store CloudFormation metadata in moto
resource = parse_and_create_resource_orig(
logical_id, resource_json, resources_map, region_name
)
# Fix for moto which sometimes hard-codes region name as 'us-east-1'
if hasattr(resource, "region_name") and resource.region_name != region_name:
LOG.debug(
"Updating incorrect region from %s to %s"
% (resource.region_name, region_name)
)
resource.region_name = region_name
# Apply some fixes/patches to the resource names, then deploy resource in LocalStack
update_resource_name(resource, resource_json)
LOG.debug("Deploying CloudFormation resource: %s" % resource_json)
try:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = True
deploy_func = (
template_deployer.deploy_resource
if should_be_created
else template_deployer.update_resource
)
result = deploy_func(logical_id, resource_wrapped, stack_name=stack_name)
finally:
CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = False
if not should_be_created:
# skip the parts below for update requests
return resource
def find_id(resource):
"""Find ID of the given resource."""
for id_attr in ("Id", "id", "ResourceId", "RestApiId", "DeploymentId"):
if id_attr in resource:
return resource[id_attr]
# update resource IDs to avoid mismatch between CF moto and LocalStack backend resources
if hasattr(resource, "id") or (
isinstance(resource, dict) and resource.get("id")
):
existing_id = resource.id if hasattr(resource, "id") else resource["id"]
new_res_id = find_id(result)
LOG.debug(
"Updating resource id: %s - %s, %s - %s"
% (existing_id, new_res_id, resource, resource_json)
)
if new_res_id:
LOG.info(
"Updating resource ID from %s to %s (%s)"
% (existing_id, new_res_id, region_name)
)
update_resource_id(resource, new_res_id, props, region_name)
else:
LOG.warning(
"Unable to extract id for resource %s: %s" % (logical_id, result)
)
# update physical_resource_id field
update_physical_resource_id(resource)
return resource
def update_resource_name(resource, resource_json):
"""Some resources require minor fixes in their CF resource definition
before we can pass them on to deployment."""
props = resource_json["Properties"] = resource_json.get("Properties") or {}
if isinstance(resource, sfn_models.StateMachine) and not props.get(
"StateMachineName"
):
props["StateMachineName"] = resource.name
def update_resource_id(resource, new_id, props, region_name):
"""Update and fix the ID(s) of the given resource."""
# NOTE: this is a bit of a hack, which is required because
# of the order of events when CloudFormation resources are created.
# When we process a request to create a CF resource that's part of a
# stack, say, an API Gateway Resource, then we (1) create the object
# in memory in moto, which generates a random ID for the resource, and
# (2) create the actual resource in the backend service using
# template_deployer.deploy_resource(..) (see above).
# The resource created in (2) now has a different ID than the resource
# created in (1), which leads to downstream problems. Hence, we need
# the logic below to reconcile the ids, i.e., apply IDs from (2) to (1).
backend = apigw_models.apigateway_backends[region_name]
if isinstance(resource, apigw_models.RestAPI):
backend.apis.pop(resource.id, None)
backend.apis[new_id] = resource
# We also need to fetch the resources to replace the root resource
# that moto automatically adds to newly created RestAPI objects
client = aws_stack.connect_to_service("apigateway")
resources = client.get_resources(restApiId=new_id, limit=500)["items"]
# make sure no resources have been added in addition to the root /
assert len(resource.resources) == 1
resource.resources = {}
for res in resources:
res_path_part = res.get("pathPart") or res.get("path")
child = resource.add_child(res_path_part, res.get("parentId"))
resource.resources.pop(child.id)
child.id = res["id"]
child.api_id = new_id
resource.resources[child.id] = child
resource.id = new_id
elif isinstance(resource, apigw_models.Resource):
api_id = props["RestApiId"]
backend.apis[api_id].resources.pop(resource.id, None)
backend.apis[api_id].resources[new_id] = resource
resource.id = new_id
elif isinstance(resource, apigw_models.Deployment):
api_id = props["RestApiId"]
backend.apis[api_id].deployments.pop(resource["id"], None)
backend.apis[api_id].deployments[new_id] = resource
resource["id"] = new_id
else:
LOG.warning(
"Unexpected resource type when updating ID: %s" % type(resource)
)
def update_physical_resource_id(resource):
phys_res_id = (
getattr(resource, "physical_resource_id")
if hasattr(resource, "physical_resource_id")
else None
)
if not phys_res_id:
if isinstance(resource, lambda_models.LambdaFunction):
func_arn = aws_stack.lambda_function_arn(resource.function_name)
resource.function_arn = resource.physical_resource_id = func_arn
elif isinstance(resource, sfn_models.StateMachine):
sm_arn = aws_stack.state_machine_arn(resource.name)
resource.physical_resource_id = sm_arn
elif isinstance(resource, service_models.StepFunctionsActivity):
act_arn = aws_stack.stepfunctions_activity_arn(
resource.params.get("Name")
)
resource.physical_resource_id = act_arn
else:
LOG.warning(
"Unable to determine physical_resource_id for resource %s"
% type(resource)
)
parse_and_create_resource_orig = parsing.parse_and_create_resource
parsing.parse_and_create_resource = parse_and_create_resource
# Patch CloudFormation parse_output(..) method to fix a bug in moto
def parse_output(output_logical_id, output_json, resources_map):
try:
return parse_output_orig(output_logical_id, output_json, resources_map)
except KeyError:
output = Output()
output.key = output_logical_id
output.value = None
output.description = output_json.get("Description")
return output
parse_output_orig = parsing.parse_output
parsing.parse_output = parse_output
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB_Table_get_cfn_attribute(self, attribute_name):
try:
return DynamoDB_Table_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.dynamodb_table_arn(table_name=self.name)
raise
DynamoDB_Table_get_cfn_attribute_orig = dynamodb_models.Table.get_cfn_attribute
dynamodb_models.Table.get_cfn_attribute = DynamoDB_Table_get_cfn_attribute
# Patch SQS get_cfn_attribute(..) method in moto
def SQS_Queue_get_cfn_attribute(self, attribute_name):
if attribute_name == "Arn":
return aws_stack.sqs_queue_arn(queue_name=self.name)
return SQS_Queue_get_cfn_attribute_orig(self, attribute_name)
SQS_Queue_get_cfn_attribute_orig = sqs_models.Queue.get_cfn_attribute
sqs_models.Queue.get_cfn_attribute = SQS_Queue_get_cfn_attribute
# Patch Lambda get_cfn_attribute(..) method in moto
def Lambda_Function_get_cfn_attribute(self, attribute_name):
try:
if attribute_name == "Arn":
return self.function_arn
return Lambda_Function_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name in ("Name", "FunctionName"):
return self.function_name
raise
Lambda_Function_get_cfn_attribute_orig = (
lambda_models.LambdaFunction.get_cfn_attribute
)
lambda_models.LambdaFunction.get_cfn_attribute = Lambda_Function_get_cfn_attribute
# Patch DynamoDB get_cfn_attribute(..) method in moto
def DynamoDB_Table_get_cfn_attribute(self, attribute_name):
try:
if attribute_name == "StreamArn":
streams = aws_stack.connect_to_service("dynamodbstreams").list_streams(
TableName=self.name
)["Streams"]
return streams[0]["StreamArn"] if streams else None
return DynamoDB_Table_get_cfn_attribute_orig(self, attribute_name)
except Exception as e:
LOG.warning(
'Unable to get attribute "%s" from resource %s: %s'
% (attribute_name, type(self), e)
)
raise
DynamoDB_Table_get_cfn_attribute_orig = dynamodb_models.Table.get_cfn_attribute
dynamodb_models.Table.get_cfn_attribute = DynamoDB_Table_get_cfn_attribute
# Patch IAM get_cfn_attribute(..) method in moto
def IAM_Role_get_cfn_attribute(self, attribute_name):
try:
return IAM_Role_get_cfn_attribute_orig(self, attribute_name)
except Exception:
if attribute_name == "Arn":
return aws_stack.role_arn(self.name)
raise
IAM_Role_get_cfn_attribute_orig = iam_models.Role.get_cfn_attribute
iam_models.Role.get_cfn_attribute = IAM_Role_get_cfn_attribute
# Patch LambdaFunction create_from_cloudformation_json(..) method in moto
@classmethod
def Lambda_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
resource_name = (
cloudformation_json.get("Properties", {}).get("FunctionName")
or resource_name
)
return Lambda_create_from_cloudformation_json_orig(
resource_name, cloudformation_json, region_name
)
Lambda_create_from_cloudformation_json_orig = (
lambda_models.LambdaFunction.create_from_cloudformation_json
)
lambda_models.LambdaFunction.create_from_cloudformation_json = (
Lambda_create_from_cloudformation_json
)
# add CloudWatch types
parsing.MODEL_MAP["AWS::ApiGateway::Deployment"] = apigw_models.Deployment
parsing.MODEL_MAP["AWS::ApiGateway::Method"] = apigw_models.Method
parsing.MODEL_MAP["AWS::ApiGateway::Resource"] = apigw_models.Resource
parsing.MODEL_MAP["AWS::ApiGateway::RestApi"] = apigw_models.RestAPI
parsing.MODEL_MAP["AWS::StepFunctions::StateMachine"] = sfn_models.StateMachine
@classmethod
def RestAPI_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
name = props["Name"]
region_name = props.get("Region") or DEFAULT_REGION
description = props.get("Description") or ""
id = props.get("Id") or short_uid()
return apigw_models.RestAPI(id, region_name, name, description)
def RestAPI_get_cfn_attribute(self, attribute_name):
if attribute_name == "Id":
return self.id
if attribute_name == "Region":
return self.region_name
if attribute_name == "Name":
return self.name
if attribute_name == "Description":
return self.description
if attribute_name == "RootResourceId":
for id, resource in self.resources.items():
if resource.parent_id is None:
return resource.id
return None
raise UnformattedGetAttTemplateException()
@classmethod
def Deployment_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
name = props["StageName"]
deployment_id = props.get("Id") or short_uid()
description = props.get("Description") or ""
return apigw_models.Deployment(deployment_id, name, description)
@classmethod
def Resource_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
region_name = props.get("Region") or DEFAULT_REGION
path_part = props.get("PathPart")
api_id = props.get("RestApiId")
parent_id = props.get("ParentId")
id = props.get("Id") or short_uid()
return apigw_models.Resource(id, region_name, api_id, path_part, parent_id)
@classmethod
def Method_create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
props = cloudformation_json["Properties"]
method_type = props.get("HttpMethod")
authorization_type = props.get("AuthorizationType")
return apigw_models.Method(method_type, authorization_type)
apigw_models.RestAPI.create_from_cloudformation_json = (
RestAPI_create_from_cloudformation_json
)
apigw_models.RestAPI.get_cfn_attribute = RestAPI_get_cfn_attribute
apigw_models.Deployment.create_from_cloudformation_json = (
Deployment_create_from_cloudformation_json
)
apigw_models.Resource.create_from_cloudformation_json = (
Resource_create_from_cloudformation_json
)
apigw_models.Method.create_from_cloudformation_json = (
Method_create_from_cloudformation_json
)
# TODO: add support for AWS::ApiGateway::Model, AWS::ApiGateway::RequestValidator, ...
# fix AttributeError in moto's CloudFormation describe_stack_resource
def describe_stack_resource(self):
stack_name = self._get_param("StackName")
stack = self.cloudformation_backend.get_stack(stack_name)
logical_resource_id = self._get_param("LogicalResourceId")
for stack_resource in stack.stack_resources:
# Note: Line below has been patched
# if stack_resource.logical_resource_id == logical_resource_id:
if (
stack_resource
and stack_resource.logical_resource_id == logical_resource_id
):
resource = stack_resource
break
else:
raise ValidationError(logical_resource_id)
template = self.response_template(
responses.DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE
)
return template.render(stack=stack, resource=resource)
responses.CloudFormationResponse.describe_stack_resource = describe_stack_resource
|
https://github.com/localstack/localstack/issues/438
|
2017-11-02T15:45:03:ERROR:localstack.services.generic_proxy: Error forwarding request: An error occurred (ValidationError) when calling the DescribeStackResources operation: Stack with id foo does not exist Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 185, in forward
path=path, data=data, headers=forward_headers, response=response)
File "/opt/code/localstack/localstack/services/cloudformation/cloudformation_listener.py", line 181, in return_response
template_deployer.deploy_template(template, req_data.get('StackName')[0])
File "/opt/code/localstack/localstack/utils/cloudformation/template_deployer.py", line 484, in deploy_template
stack_resources = describe_stack_resources(stack_name, resource_id)
File "/opt/code/localstack/localstack/utils/cloudformation/template_deployer.py", line 218, in describe_stack_resources
resources = client.describe_stack_resources(StackName=stack_name, LogicalResourceId=logical_resource_id)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 310, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/botocore/client.py", line 599, in _make_api_call
raise error_class(parsed_response, operation_name)
ClientError: An error occurred (ValidationError) when calling the DescribeStackResources operation: Stack with id foo does not exist
|
ClientError
|
def return_response(self, method, path, data, headers, response):
action = headers.get("X-Amz-Target")
data = json.loads(to_str(data))
records = []
if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM):
event_type = (
event_publisher.EVENT_KINESIS_CREATE_STREAM
if action == ACTION_CREATE_STREAM
else event_publisher.EVENT_KINESIS_DELETE_STREAM
)
payload = {"n": event_publisher.get_hash(data.get("StreamName"))}
if action == ACTION_CREATE_STREAM:
payload["s"] = data.get("ShardCount")
event_publisher.fire_event(event_type, payload=payload)
elif action == ACTION_PUT_RECORD:
response_body = json.loads(to_str(response.content))
event_record = {
"data": data["Data"],
"partitionKey": data["PartitionKey"],
"sequenceNumber": response_body.get("SequenceNumber"),
}
event_records = [event_record]
stream_name = data["StreamName"]
lambda_api.process_kinesis_records(event_records, stream_name)
elif action == ACTION_PUT_RECORDS:
event_records = []
response_body = json.loads(to_str(response.content))
if "Records" in response_body:
response_records = response_body["Records"]
records = data["Records"]
for index in range(0, len(records)):
record = records[index]
event_record = {
"data": record["Data"],
"partitionKey": record["PartitionKey"],
"sequenceNumber": response_records[index].get("SequenceNumber"),
}
event_records.append(event_record)
stream_name = data["StreamName"]
lambda_api.process_kinesis_records(event_records, stream_name)
elif action == ACTION_UPDATE_SHARD_COUNT:
# Currently kinesalite, which backs the Kinesis implementation for localstack, does
# not support UpdateShardCount:
# https://github.com/mhart/kinesalite/issues/61
#
# [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it
# applies Kinesis resources. A Terraform run fails when this is not present.
#
# The code that follows just returns a successful response, bypassing the 400
# response that kinesalite returns.
#
response = Response()
response.status_code = 200
content = {
"CurrentShardCount": 1,
"StreamName": data["StreamName"],
"TargetShardCount": data["TargetShardCount"],
}
response.encoding = "UTF-8"
response._content = json.dumps(content)
return response
|
def return_response(self, method, path, data, headers, response):
action = headers.get("X-Amz-Target")
data = json.loads(to_str(data))
records = []
if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM):
event_type = (
event_publisher.EVENT_KINESIS_CREATE_STREAM
if action == ACTION_CREATE_STREAM
else event_publisher.EVENT_KINESIS_DELETE_STREAM
)
payload = {"n": event_publisher.get_hash(data.get("StreamName"))}
if action == ACTION_CREATE_STREAM:
payload["s"] = data.get("ShardCount")
event_publisher.fire_event(event_type, payload=payload)
elif action == ACTION_PUT_RECORD:
response_body = json.loads(to_str(response.content))
event_record = {
"data": data["Data"],
"partitionKey": data["PartitionKey"],
"sequenceNumber": response_body.get("SequenceNumber"),
}
event_records = [event_record]
stream_name = data["StreamName"]
lambda_api.process_kinesis_records(event_records, stream_name)
elif action == ACTION_PUT_RECORDS:
event_records = []
response_body = json.loads(to_str(response.content))
response_records = response_body["Records"]
records = data["Records"]
for index in range(0, len(records)):
record = records[index]
event_record = {
"data": record["Data"],
"partitionKey": record["PartitionKey"],
"sequenceNumber": response_records[index].get("SequenceNumber"),
}
event_records.append(event_record)
stream_name = data["StreamName"]
lambda_api.process_kinesis_records(event_records, stream_name)
elif action == ACTION_UPDATE_SHARD_COUNT:
# Currently kinesalite, which backs the Kinesis implementation for localstack, does
# not support UpdateShardCount:
# https://github.com/mhart/kinesalite/issues/61
#
# [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it
# applies Kinesis resources. A Terraform run fails when this is not present.
#
# The code that follows just returns a successful response, bypassing the 400
# response that kinesalite returns.
#
response = Response()
response.status_code = 200
content = {
"CurrentShardCount": 1,
"StreamName": data["StreamName"],
"TargetShardCount": data["TargetShardCount"],
}
response.encoding = "UTF-8"
response._content = json.dumps(content)
return response
|
https://github.com/localstack/localstack/issues/753
|
Starting mock Kinesis (http port 4568)...
Starting mock S3 (http port 4572)...
Starting mock Firehose service (http port 4573)...
Starting mock Lambda service (http port 4574)...
Listening at http://:::4565
* Running on http://0.0.0.0:4563/ (Press CTRL+C to quit)
127.0.0.1 - - [08/May/2018 13:52:25] "GET / HTTP/1.1" 200 -
Ready.
r127.0.0.1 - - [08/May/2018 13:56:43] "PUT /prd1541-qa1-vf-sms-send-stream-archive HTTP/1.1" 200 -
127.0.0.1 - - [08/May/2018 13:56:43] "HEAD /prd1541-qa1-vf-sms-send-stream-archive HTTP/1.1" 200 -
2018-05-08T13:56:43:ERROR:localstack.services.generic_proxy: Error forwarding request: 'Records' Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 215, in forward
updated_response = self.proxy.update_listener.return_response(**kwargs)
File "/opt/code/localstack/localstack/services/kinesis/kinesis_listener.py", line 49, in return_response
response_records = response_body['Records']
KeyError: 'Records'
2018-05-08T13:56:43:ERROR:localstack.services.generic_proxy: Error forwarding request: 'Records' Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 215, in forward
updated_response = self.proxy.update_listener.return_response(**kwargs)
File "/opt/code/localstack/localstack/services/kinesis/kinesis_listener.py", line 49, in return_response
response_records = response_body['Records']
KeyError: 'Records'
2018-05-08T13:56:43:ERROR:localstack.services.generic_proxy: Error forwarding request: 'Records' Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 215, in forward
updated_response = self.proxy.update_listener.return_response(**kwargs)
File "/opt/code/localstack/localstack/services/kinesis/kinesis_listener.py", line 49, in return_response
response_records = response_body['Records']
KeyError: 'Records'
|
KeyError
|
def forward(self, method):
path = self.path
if "://" in path:
path = "/" + path.split("://", 1)[1].split("/", 1)[1]
proxy_url = "%s%s" % (self.proxy.forward_url, path)
target_url = self.path
if "://" not in target_url:
target_url = "%s%s" % (self.proxy.forward_url, target_url)
data = self.data_bytes
forward_headers = CaseInsensitiveDict(self.headers)
# update original "Host" header (moto s3 relies on this behavior)
if not forward_headers.get("Host"):
forward_headers["host"] = urlparse(target_url).netloc
if "localhost.atlassian.io" in forward_headers.get("Host"):
forward_headers["host"] = "localhost"
try:
response = None
modified_request = None
# update listener (pre-invocation)
if self.proxy.update_listener:
listener_result = self.proxy.update_listener.forward_request(
method=method, path=path, data=data, headers=forward_headers
)
if isinstance(listener_result, Response):
response = listener_result
elif isinstance(listener_result, Request):
modified_request = listener_result
data = modified_request.data
forward_headers = modified_request.headers
elif listener_result is not True:
# get status code from response, or use Bad Gateway status code
code = listener_result if isinstance(listener_result, int) else 503
self.send_response(code)
self.end_headers()
return
# perform the actual invocation of the backend service
if response is None:
if modified_request:
response = self.method(
proxy_url,
data=modified_request.data,
headers=modified_request.headers,
)
else:
response = self.method(
proxy_url, data=self.data_bytes, headers=forward_headers
)
# update listener (post-invocation)
if self.proxy.update_listener:
kwargs = {
"method": method,
"path": path,
"data": data,
"headers": forward_headers,
"response": response,
}
if (
"request_handler"
in inspect.getargspec(self.proxy.update_listener.return_response)[0]
):
# some listeners (e.g., sqs_listener.py) require additional details like the original
# request port, hence we pass in a reference to this request handler as well.
kwargs["request_handler"] = self
updated_response = self.proxy.update_listener.return_response(**kwargs)
if isinstance(updated_response, Response):
response = updated_response
# copy headers and return response
self.send_response(response.status_code)
content_length_sent = False
for header_key, header_value in iteritems(response.headers):
# filter out certain headers that we don't want to transmit
if header_key.lower() not in ("transfer-encoding", "date", "server"):
self.send_header(header_key, header_value)
content_length_sent = (
content_length_sent or header_key.lower() == "content-length"
)
if not content_length_sent:
self.send_header(
"Content-Length",
"%s" % len(response.content) if response.content else 0,
)
# allow pre-flight CORS headers by default
if "Access-Control-Allow-Origin" not in response.headers:
self.send_header("Access-Control-Allow-Origin", "*")
if "Access-Control-Allow-Methods" not in response.headers:
self.send_header(
"Access-Control-Allow-Methods", ",".join(CORS_ALLOWED_METHODS)
)
if "Access-Control-Allow-Headers" not in response.headers:
self.send_header(
"Access-Control-Allow-Headers", ",".join(CORS_ALLOWED_HEADERS)
)
self.end_headers()
if response.content and len(response.content):
self.wfile.write(to_bytes(response.content))
self.wfile.flush()
except Exception as e:
trace = str(traceback.format_exc())
conn_errors = ("ConnectionRefusedError", "NewConnectionError")
conn_error = any(e in trace for e in conn_errors)
error_msg = "Error forwarding request: %s %s" % (e, trace)
if "Broken pipe" in trace:
LOGGER.warn("Connection prematurely closed by client (broken pipe).")
elif not self.proxy.quiet or not conn_error:
LOGGER.error(error_msg)
if os.environ.get(ENV_INTERNAL_TEST_RUN):
# During a test run, we also want to print error messages, because
# log messages are delayed until the entire test run is over, and
# hence we are missing messages if the test hangs for some reason.
print("ERROR: %s" % error_msg)
self.send_response(502) # bad gateway
self.end_headers()
|
def forward(self, method):
path = self.path
if "://" in path:
path = "/" + path.split("://", 1)[1].split("/", 1)[1]
proxy_url = "%s%s" % (self.proxy.forward_url, path)
target_url = self.path
if "://" not in target_url:
target_url = "%s%s" % (self.proxy.forward_url, target_url)
data = self.data_bytes
forward_headers = CaseInsensitiveDict(self.headers)
# update original "Host" header (moto s3 relies on this behavior)
if not forward_headers.get("Host"):
forward_headers["host"] = urlparse(target_url).netloc
if "localhost.atlassian.io" in forward_headers.get("Host"):
forward_headers["host"] = "localhost"
try:
response = None
modified_request = None
# update listener (pre-invocation)
if self.proxy.update_listener:
listener_result = self.proxy.update_listener.forward_request(
method=method, path=path, data=data, headers=forward_headers
)
if isinstance(listener_result, Response):
response = listener_result
elif isinstance(listener_result, Request):
modified_request = listener_result
data = modified_request.data
forward_headers = modified_request.headers
elif listener_result is not True:
# get status code from response, or use Bad Gateway status code
code = listener_result if isinstance(listener_result, int) else 503
self.send_response(code)
self.end_headers()
return
# perform the actual invocation of the backend service
if response is None:
if modified_request:
response = self.method(
proxy_url,
data=modified_request.data,
headers=modified_request.headers,
)
else:
response = self.method(
proxy_url, data=self.data_bytes, headers=forward_headers
)
# update listener (post-invocation)
if self.proxy.update_listener:
kwargs = {
"method": method,
"path": path,
"data": data,
"headers": forward_headers,
"response": response,
}
if (
"request_handler"
in inspect.getargspec(self.proxy.update_listener.return_response)[0]
):
# some listeners (e.g., sqs_listener.py) require additional details like the original
# request port, hence we pass in a reference to this request handler as well.
kwargs["request_handler"] = self
updated_response = self.proxy.update_listener.return_response(**kwargs)
if isinstance(updated_response, Response):
response = updated_response
# copy headers and return response
self.send_response(response.status_code)
content_length_sent = False
for header_key, header_value in iteritems(response.headers):
# filter out certain headers that we don't want to transmit
if header_key not in ["Transfer-Encoding"]:
self.send_header(header_key, header_value)
content_length_sent = (
content_length_sent or header_key.lower() == "content-length"
)
if not content_length_sent:
self.send_header(
"Content-Length",
"%s" % len(response.content) if response.content else 0,
)
# allow pre-flight CORS headers by default
if "Access-Control-Allow-Origin" not in response.headers:
self.send_header("Access-Control-Allow-Origin", "*")
if "Access-Control-Allow-Methods" not in response.headers:
self.send_header(
"Access-Control-Allow-Methods", ",".join(CORS_ALLOWED_METHODS)
)
if "Access-Control-Allow-Headers" not in response.headers:
self.send_header(
"Access-Control-Allow-Headers", ",".join(CORS_ALLOWED_HEADERS)
)
self.end_headers()
if response.content and len(response.content):
self.wfile.write(to_bytes(response.content))
self.wfile.flush()
except Exception as e:
trace = str(traceback.format_exc())
conn_errors = ("ConnectionRefusedError", "NewConnectionError")
conn_error = any(e in trace for e in conn_errors)
error_msg = "Error forwarding request: %s %s" % (e, trace)
if "Broken pipe" in trace:
LOGGER.warn("Connection prematurely closed by client (broken pipe).")
elif not self.proxy.quiet or not conn_error:
LOGGER.error(error_msg)
if os.environ.get(ENV_INTERNAL_TEST_RUN):
# During a test run, we also want to print error messages, because
# log messages are delayed until the entire test run is over, and
# hence we are missing messages if the test hangs for some reason.
print("ERROR: %s" % error_msg)
self.send_response(502) # bad gateway
self.end_headers()
|
https://github.com/localstack/localstack/issues/639
|
localstack_1 | 2018-03-06 09:33:15,386 INFO spawned: 'infra' with pid 7848
localstack_1 | (. .venv/bin/activate; exec bin/localstack start)
localstack_1 | Starting local dev environment. CTRL-C to quit.
localstack_1 | Starting mock DynamoDB (https port 12000)...
localstack_1 | Starting mock S3 (https port 443)...
localstack_1 | Starting mock DynamoDB Streams service (https port 4570)...
localstack_1 | Initializing DynamoDB Local with the following configuration:
localstack_1 | Port: 4564
localstack_1 | InMemory: false
localstack_1 | DbPath: /tmp/localstack/data/dynamodb
localstack_1 | SharedDb: true
localstack_1 | shouldDelayTransientStatuses: false
localstack_1 | CorsParams: *
localstack_1 |
localstack_1 | 2018-03-06 09:33:17,346 INFO success: infra entered RUNNING state, process has stayed up for > than 1 seconds (startsecs)
localstack_1 | * Running on http://0.0.0.0:4563/ (Press CTRL+C to quit)
localstack_1 | 127.0.0.1 - - [06/Mar/2018 09:33:19] "GET / HTTP/1.1" 200 -
localstack_1 | Error starting infrastructure: HTTPSConnectionPool(host='afcc8dd10072', port=443): Max retries exceeded with url: /test (Caused by SSLError(SSLError("bad handshake: Error([('SSL routines', '$sl3_get_server_certificate', 'certificate verify failed')],)",),)) Traceback (most recent call last):
localstack_1 | File "/opt/code/localstack/localstack/services/infra.py", line 484, in start_infra
localstack_1 | restore_persisted_data(apis=apis)
localstack_1 | File "/opt/code/localstack/localstack/services/infra.py", line 219, in restore_persisted_data
localstack_1 | persistence.restore_persisted_data(api)
localstack_1 | File "/opt/code/localstack/localstack/utils/persistence.py", line 92, in restore_persisted_data
localstack_1 | return replay(api)
localstack_1 | File "/opt/code/localstack/localstack/utils/persistence.py", line 84, in replay
localstack_1 | replay_command(command)
localstack_1 | File "/opt/code/localstack/localstack/utils/persistence.py", line 68, in replay_command
localstack_1 | result = function(full_url, data=data, headers=command['h'])
localstack_1 | File "/opt/code/localstack/.venv/lib/python2.7/site-packages/requests/api.py", line 126, in put
localstack_1 | return request('put', url, data=data, **kwargs)
localstack_1 | File "/opt/code/localstack/.venv/lib/python2.7/site-packages/requests/api.py", line 58, in request
localstack_1 | return session.request(method=method, url=url, **kwargs)
localstack_1 | File "/opt/code/localstack/.venv/lib/python2.7/site-packages/requests/sessions.py", line 508, in request
localstack_1 | resp = self.send(prep, **send_kwargs)
localstack_1 | File "/opt/code/localstack/.venv/lib/python2.7/site-packages/requests/sessions.py", line 618, in send
localstack_1 | r = adapter.send(request, **kwargs)
localstack_1 | File "/opt/code/localstack/.venv/lib/python2.7/site-packages/requests/adapters.py", line 506, in send
localstack_1 | raise SSLError(e, request=request)
localstack_1 | SSLError: HTTPSConnectionPool(host='afcc8dd10072', port=443): Max retries exceeded with url: /test (Caused by SSLError(SSLError("bad handshake: Error([('SSL routines', 'ssl3_get_server_certi$icate', 'certificate verify failed')],)",),))
localstack_1 |
localstack_1 | 2018-03-06 09:33:19,925 INFO reaped unknown pid 7877
localstack_1 | 2018-03-06 09:33:19,927 INFO reaped unknown pid 7886
localstack_1 | ERROR: HTTPSConnectionPool(host='afcc8dd10072', port=443): Max retries exceeded with url: /test (Caused by SSLError(SSLError("bad handshake: Error([('SSL routines', 'ssl3_get_server_certifica
te', 'certificate verify failed')],)",),))
localstack_1 | 2018-03-06 09:33:21,953 INFO reaped unknown pid 7884
localstack_1 | 2018-03-06 09:33:21,956 INFO exited: infra (exit status 0; expected)
|
SSLError
|
def replay_command(command):
function = getattr(requests, command["m"].lower())
data = command["d"]
if data:
data = base64.b64decode(data)
endpoint = aws_stack.get_local_service_url(command["a"])
full_url = (endpoint[:-1] if endpoint.endswith("/") else endpoint) + command["p"]
result = function(full_url, data=data, headers=command["h"], verify=False)
return result
|
def replay_command(command):
function = getattr(requests, command["m"].lower())
data = command["d"]
if data:
data = base64.b64decode(data)
endpoint = aws_stack.get_local_service_url(command["a"])
full_url = (endpoint[:-1] if endpoint.endswith("/") else endpoint) + command["p"]
result = function(full_url, data=data, headers=command["h"])
return result
|
https://github.com/localstack/localstack/issues/639
|
localstack_1 | 2018-03-06 09:33:15,386 INFO spawned: 'infra' with pid 7848
localstack_1 | (. .venv/bin/activate; exec bin/localstack start)
localstack_1 | Starting local dev environment. CTRL-C to quit.
localstack_1 | Starting mock DynamoDB (https port 12000)...
localstack_1 | Starting mock S3 (https port 443)...
localstack_1 | Starting mock DynamoDB Streams service (https port 4570)...
localstack_1 | Initializing DynamoDB Local with the following configuration:
localstack_1 | Port: 4564
localstack_1 | InMemory: false
localstack_1 | DbPath: /tmp/localstack/data/dynamodb
localstack_1 | SharedDb: true
localstack_1 | shouldDelayTransientStatuses: false
localstack_1 | CorsParams: *
localstack_1 |
localstack_1 | 2018-03-06 09:33:17,346 INFO success: infra entered RUNNING state, process has stayed up for > than 1 seconds (startsecs)
localstack_1 | * Running on http://0.0.0.0:4563/ (Press CTRL+C to quit)
localstack_1 | 127.0.0.1 - - [06/Mar/2018 09:33:19] "GET / HTTP/1.1" 200 -
localstack_1 | Error starting infrastructure: HTTPSConnectionPool(host='afcc8dd10072', port=443): Max retries exceeded with url: /test (Caused by SSLError(SSLError("bad handshake: Error([('SSL routines', '$sl3_get_server_certificate', 'certificate verify failed')],)",),)) Traceback (most recent call last):
localstack_1 | File "/opt/code/localstack/localstack/services/infra.py", line 484, in start_infra
localstack_1 | restore_persisted_data(apis=apis)
localstack_1 | File "/opt/code/localstack/localstack/services/infra.py", line 219, in restore_persisted_data
localstack_1 | persistence.restore_persisted_data(api)
localstack_1 | File "/opt/code/localstack/localstack/utils/persistence.py", line 92, in restore_persisted_data
localstack_1 | return replay(api)
localstack_1 | File "/opt/code/localstack/localstack/utils/persistence.py", line 84, in replay
localstack_1 | replay_command(command)
localstack_1 | File "/opt/code/localstack/localstack/utils/persistence.py", line 68, in replay_command
localstack_1 | result = function(full_url, data=data, headers=command['h'])
localstack_1 | File "/opt/code/localstack/.venv/lib/python2.7/site-packages/requests/api.py", line 126, in put
localstack_1 | return request('put', url, data=data, **kwargs)
localstack_1 | File "/opt/code/localstack/.venv/lib/python2.7/site-packages/requests/api.py", line 58, in request
localstack_1 | return session.request(method=method, url=url, **kwargs)
localstack_1 | File "/opt/code/localstack/.venv/lib/python2.7/site-packages/requests/sessions.py", line 508, in request
localstack_1 | resp = self.send(prep, **send_kwargs)
localstack_1 | File "/opt/code/localstack/.venv/lib/python2.7/site-packages/requests/sessions.py", line 618, in send
localstack_1 | r = adapter.send(request, **kwargs)
localstack_1 | File "/opt/code/localstack/.venv/lib/python2.7/site-packages/requests/adapters.py", line 506, in send
localstack_1 | raise SSLError(e, request=request)
localstack_1 | SSLError: HTTPSConnectionPool(host='afcc8dd10072', port=443): Max retries exceeded with url: /test (Caused by SSLError(SSLError("bad handshake: Error([('SSL routines', 'ssl3_get_server_certi$icate', 'certificate verify failed')],)",),))
localstack_1 |
localstack_1 | 2018-03-06 09:33:19,925 INFO reaped unknown pid 7877
localstack_1 | 2018-03-06 09:33:19,927 INFO reaped unknown pid 7886
localstack_1 | ERROR: HTTPSConnectionPool(host='afcc8dd10072', port=443): Max retries exceeded with url: /test (Caused by SSLError(SSLError("bad handshake: Error([('SSL routines', 'ssl3_get_server_certifica
te', 'certificate verify failed')],)",),))
localstack_1 | 2018-03-06 09:33:21,953 INFO reaped unknown pid 7884
localstack_1 | 2018-03-06 09:33:21,956 INFO exited: infra (exit status 0; expected)
|
SSLError
|
def forward_request(self, method, path, data, headers):
if method == "POST" and path == "/":
req_data = urlparse.parse_qs(to_str(data))
req_action = req_data["Action"][0]
topic_arn = req_data.get("TargetArn") or req_data.get("TopicArn")
if topic_arn:
topic_arn = topic_arn[0]
do_create_topic(topic_arn)
if req_action == "SetSubscriptionAttributes":
sub = get_subscription_by_arn(req_data["SubscriptionArn"][0])
if not sub:
return make_error(
message="Unable to find subscription for given ARN", code=400
)
attr_name = req_data["AttributeName"][0]
attr_value = req_data["AttributeValue"][0]
sub[attr_name] = attr_value
return make_response(req_action)
elif req_action == "GetSubscriptionAttributes":
sub = get_subscription_by_arn(req_data["SubscriptionArn"][0])
if not sub:
return make_error(
message="Unable to find subscription for given ARN", code=400
)
content = "<Attributes>"
for key, value in sub.items():
content += "<entry><key>%s</key><value>%s</value></entry>\n" % (
key,
value,
)
content += "</Attributes>"
return make_response(req_action, content=content)
elif req_action == "Subscribe":
if "Endpoint" not in req_data:
return make_error(
message="Endpoint not specified in subscription", code=400
)
elif req_action == "Unsubscribe":
if "SubscriptionArn" not in req_data:
return make_error(
message="SubscriptionArn not specified in unsubscribe request",
code=400,
)
do_unsubscribe(req_data.get("SubscriptionArn")[0])
elif req_action == "Publish":
message = req_data["Message"][0]
sqs_client = aws_stack.connect_to_service("sqs")
for subscriber in SNS_SUBSCRIPTIONS[topic_arn]:
if subscriber["Protocol"] == "sqs":
endpoint = subscriber["Endpoint"]
if "sqs_queue_url" in subscriber:
queue_url = subscriber.get("sqs_queue_url")
elif "://" in endpoint:
queue_url = endpoint
else:
queue_name = endpoint.split(":")[5]
queue_url = aws_stack.get_sqs_queue_url(queue_name)
subscriber["sqs_queue_url"] = queue_url
sqs_client.send_message(
QueueUrl=queue_url,
MessageBody=create_sns_message_body(subscriber, req_data),
)
elif subscriber["Protocol"] == "lambda":
lambda_api.process_sns_notification(
subscriber["Endpoint"],
topic_arn,
message,
subject=req_data.get("Subject"),
)
elif subscriber["Protocol"] in ["http", "https"]:
requests.post(
subscriber["Endpoint"],
headers={
"Content-Type": "text/plain",
"x-amz-sns-message-type": "Notification",
},
data=create_sns_message_body(subscriber, req_data),
)
else:
LOGGER.warning(
'Unexpected protocol "%s" for SNS subscription'
% subscriber["Protocol"]
)
# return response here because we do not want the request to be forwarded to SNS
return make_response(req_action)
return True
|
def forward_request(self, method, path, data, headers):
if method == "POST" and path == "/":
req_data = urlparse.parse_qs(to_str(data))
req_action = req_data["Action"][0]
topic_arn = req_data.get("TargetArn") or req_data.get("TopicArn")
if topic_arn:
topic_arn = topic_arn[0]
do_create_topic(topic_arn)
if req_action == "SetSubscriptionAttributes":
sub = get_subscription_by_arn(req_data["SubscriptionArn"][0])
if not sub:
return make_error(
message="Unable to find subscription for given ARN", code=400
)
attr_name = req_data["AttributeName"][0]
attr_value = req_data["AttributeValue"][0]
sub[attr_name] = attr_value
return make_response(req_action)
elif req_action == "GetSubscriptionAttributes":
sub = get_subscription_by_arn(req_data["SubscriptionArn"][0])
if not sub:
return make_error(
message="Unable to find subscription for given ARN", code=400
)
content = "<Attributes>"
for key, value in sub.items():
content += "<entry><key>%s</key><value>%s</value></entry>\n" % (
key,
value,
)
content += "</Attributes>"
return make_response(req_action, content=content)
elif req_action == "Subscribe":
if "Endpoint" not in req_data:
return make_error(
message="Endpoint not specified in subscription", code=400
)
elif req_action == "Unsubscribe":
if "SubscriptionArn" not in req_data:
return make_error(
message="SubscriptionArn not specified in unsubscribe request",
code=400,
)
do_unsubscribe(req_data.get("SubscriptionArn")[0])
elif req_action == "Publish":
message = req_data["Message"][0]
sqs_client = aws_stack.connect_to_service("sqs")
for subscriber in SNS_SUBSCRIPTIONS[topic_arn]:
if subscriber["Protocol"] == "sqs":
queue_name = subscriber["Endpoint"].split(":")[5]
queue_url = subscriber.get("sqs_queue_url")
if not queue_url:
queue_url = aws_stack.get_sqs_queue_url(queue_name)
subscriber["sqs_queue_url"] = queue_url
sqs_client.send_message(
QueueUrl=queue_url,
MessageBody=create_sns_message_body(subscriber, req_data),
)
elif subscriber["Protocol"] == "lambda":
lambda_api.process_sns_notification(
subscriber["Endpoint"],
topic_arn,
message,
subject=req_data.get("Subject"),
)
elif subscriber["Protocol"] in ["http", "https"]:
requests.post(
subscriber["Endpoint"],
headers={
"Content-Type": "text/plain",
"x-amz-sns-message-type": "Notification",
},
data=create_sns_message_body(subscriber, req_data),
)
else:
LOGGER.warning(
'Unexpected protocol "%s" for SNS subscription'
% subscriber["Protocol"]
)
# return response here because we do not want the request to be forwarded to SNS
return make_response(req_action)
return True
|
https://github.com/localstack/localstack/issues/510
|
2017-12-13T18:10:44:ERROR:localstack.services.generic_proxy: Error forwarding request: list index out of range Traceback (most recent call last):
File "/Users/mpandit/work/localstack/localstack/services/generic_proxy.py", line 181, in forward
path=path, data=data, headers=forward_headers)
File "/Users/mpandit/work/localstack/localstack/services/sns/sns_listener.py", line 63, in forward_request
queue_name = subscriber['Endpoint'].split(':')[5]
IndexError: list index out of range
|
IndexError
|
def strip_chunk_signatures(data):
# For clients that use streaming v4 authentication, the request contains chunk signatures
# in the HTTP body (see example below) which we need to strip as moto cannot handle them
#
# 17;chunk-signature=6e162122ec4962bea0b18bc624025e6ae4e9322bdc632762d909e87793ac5921
# <payload data ...>
# 0;chunk-signature=927ab45acd82fc90a3c210ca7314d59fedc77ce0c914d79095f8cc9563cf2c70
data_new = re.sub(
b"(\r\n)?[0-9a-fA-F]+;chunk-signature=[0-9a-f]{64}(\r\n){,2}",
b"",
data,
flags=re.MULTILINE | re.DOTALL,
)
if data_new != data:
# trim \r (13) or \n (10)
for i in range(0, 2):
if len(data_new) and data_new[0] in (10, 13):
data_new = data_new[1:]
for i in range(0, 6):
if len(data_new) and data_new[-1] in (10, 13):
data_new = data_new[:-1]
return data_new
|
def strip_chunk_signatures(data):
# For clients that use streaming v4 authentication, the request contains chunk signatures
# in the HTTP body (see example below) which we need to strip as moto cannot handle them
#
# 17;chunk-signature=6e162122ec4962bea0b18bc624025e6ae4e9322bdc632762d909e87793ac5921
# <payload data ...>
# 0;chunk-signature=927ab45acd82fc90a3c210ca7314d59fedc77ce0c914d79095f8cc9563cf2c70
data_new = re.sub(
b"(\r\n)?[0-9a-fA-F]+;chunk-signature=[0-9a-f]{64}(\r\n){,2}",
b"",
data,
flags=re.MULTILINE | re.DOTALL,
)
if data_new != data:
# trim \r (13) or \n (10)
for i in range(0, 2):
if data_new[0] in (10, 13):
data_new = data_new[1:]
for i in range(0, 6):
if data_new[-1] in (10, 13):
data_new = data_new[:-1]
return data_new
|
https://github.com/localstack/localstack/issues/455
|
2017-11-10T12:42:51:ERROR:localstack.services.generic_proxy: Error forwarding request: string index out of range Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 162, in forward
path=path, data=data, headers=forward_headers)
File "/opt/code/localstack/localstack/services/s3/s3_listener.py", line 383, in forward_request
modified_data = strip_chunk_signatures(data)
File "/opt/code/localstack/localstack/services/s3/s3_listener.py", line 252, in strip_chunk_signatures
if data_new[0] in (10, 13):
IndexError: string index out of range
|
IndexError
|
def send_notifications(method, bucket_name, object_path):
for bucket, config in iteritems(S3_NOTIFICATIONS):
if bucket == bucket_name:
action = {"PUT": "ObjectCreated", "DELETE": "ObjectRemoved"}[method]
# TODO: support more detailed methods, e.g., DeleteMarkerCreated
# http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
api_method = {"PUT": "Put", "DELETE": "Delete"}[method]
event_name = "%s:%s" % (action, api_method)
if event_type_matches(
config["Event"], action, api_method
) and filter_rules_match(config.get("Filter"), object_path):
# send notification
message = get_event_message(
event_name=event_name,
bucket_name=bucket_name,
file_name=urlparse.urlparse(object_path[1:]).path,
)
message = json.dumps(message)
if config.get("Queue"):
sqs_client = aws_stack.connect_to_service("sqs")
try:
queue_url = queue_url_for_arn(config["Queue"])
sqs_client.send_message(QueueUrl=queue_url, MessageBody=message)
except Exception as e:
LOGGER.warning(
'Unable to send notification for S3 bucket "%s" to SQS queue "%s": %s'
% (bucket_name, config["Queue"], e)
)
if config.get("Topic"):
sns_client = aws_stack.connect_to_service("sns")
try:
sns_client.publish(TopicArn=config["Topic"], Message=message)
except Exception as e:
LOGGER.warning(
'Unable to send notification for S3 bucket "%s" to SNS topic "%s".'
% (bucket_name, config["Topic"])
)
if config.get("CloudFunction"):
# make sure we don't run into a socket timeout
connection_config = botocore.config.Config(read_timeout=300)
lambda_client = aws_stack.connect_to_service(
"lambda", config=connection_config
)
try:
lambda_client.invoke(
FunctionName=config["CloudFunction"], Payload=message
)
except Exception as e:
LOGGER.warning(
'Unable to send notification for S3 bucket "%s" to Lambda function "%s".'
% (bucket_name, config["CloudFunction"])
)
if not filter(
lambda x: config.get(x), ("Queue", "Topic", "CloudFunction")
):
LOGGER.warning(
"Neither of Queue/Topic/CloudFunction defined for S3 notification."
)
|
def send_notifications(method, bucket_name, object_path):
for bucket, config in iteritems(S3_NOTIFICATIONS):
if bucket == bucket_name:
action = {"PUT": "ObjectCreated", "DELETE": "ObjectRemoved"}[method]
# TODO: support more detailed methods, e.g., DeleteMarkerCreated
# http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
api_method = {"PUT": "Put", "DELETE": "Delete"}[method]
event_name = "%s:%s" % (action, api_method)
if event_type_matches(
config["Event"], action, api_method
) and filter_rules_match(config.get("Filter"), object_path):
# send notification
message = get_event_message(
event_name=event_name,
bucket_name=bucket_name,
file_name=urlparse.urlparse(object_path[1:]).path,
)
message = json.dumps(message)
if config.get("Queue"):
sqs_client = aws_stack.connect_to_service("sqs")
try:
queue_url = queue_url_for_arn(config["Queue"])
sqs_client.send_message(QueueUrl=queue_url, MessageBody=message)
except Exception as e:
LOGGER.warning(
'Unable to send notification for S3 bucket "%s" to SQS queue "%s": %s'
% (bucket_name, config["Queue"], e)
)
if config.get("Topic"):
sns_client = aws_stack.connect_to_service("sns")
try:
sns_client.publish(TopicArn=config["Topic"], Message=message)
except Exception as e:
LOGGER.warning(
'Unable to send notification for S3 bucket "%s" to SNS topic "%s".'
% (bucket_name, config["Topic"])
)
if config.get("CloudFunction"):
# make sure we don't run into a socket timeout
config = botocore.config.Config(read_timeout=300)
lambda_client = aws_stack.connect_to_service(
"lambda", config=config
)
try:
lambda_client.invoke(
FunctionName=config["CloudFunction"], Payload=message
)
except Exception as e:
LOGGER.warning(
'Unable to send notification for S3 bucket "%s" to Lambda function "%s".'
% (bucket_name, config["CloudFunction"])
)
if not filter(
lambda x: config.get(x), ("Queue", "Topic", "CloudFunction")
):
LOGGER.warning(
"Neither of Queue/Topic/CloudFunction defined for S3 notification."
)
|
https://github.com/localstack/localstack/issues/462
|
2017-11-15T01:23:19:ERROR:localstack.services.generic_proxy: Error forwarding request: 'Config' object has no attribute '__getitem__' Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 196, in forward
updated_response = self.proxy.update_listener.return_response(**kwargs)
File "/opt/code/localstack/localstack/services/s3/s3_listener.py", line 493, in return_response
send_notifications(method, bucket_name, object_path)
File "/opt/code/localstack/localstack/services/s3/s3_listener.py", line 160, in send_notifications
(bucket_name, config['CloudFunction']))
TypeError: 'Config' object has no attribute '__getitem__'
|
TypeError
|
def get_elasticsearch_domains(filter=".*", pool={}, env=None):
result = []
try:
out = cmd_es("list-domain-names", env)
out = json.loads(out)
def handle(domain):
domain = domain["DomainName"]
if re.match(filter, domain):
details = cmd_es(
"describe-elasticsearch-domain --domain-name %s" % domain, env
)
details = json.loads(details)["DomainStatus"]
arn = details["ARN"]
es = ElasticSearch(arn)
es.endpoint = details.get("Endpoint", "n/a")
result.append(es)
pool[arn] = es
parallelize(handle, out["DomainNames"])
except socket.error:
pass
return result
|
def get_elasticsearch_domains(filter=".*", pool={}, env=None):
result = []
try:
out = cmd_es("list-domain-names", env)
out = json.loads(out)
def handle(domain):
domain = domain["DomainName"]
if re.match(filter, domain):
details = cmd_es(
"describe-elasticsearch-domain --domain-name %s" % domain, env
)
details = json.loads(details)["DomainStatus"]
arn = details["ARN"]
es = ElasticSearch(arn)
es.endpoint = details["Endpoint"]
result.append(es)
pool[arn] = es
parallelize(handle, out["DomainNames"])
except socket.error:
pass
return result
|
https://github.com/localstack/localstack/issues/395
|
2017-10-11T05:49:47:INFO:werkzeug: 192.168.99.1 - - [11/Oct/2017 05:49:47] "GET / HTTP/1.1" 200 -
2017-10-11T05:49:47:INFO:werkzeug: 192.168.99.1 - - [11/Oct/2017 05:49:47] "GET //192.168.99.103:8080/swagger.json HTTP/1.1" 200 -
2017-10-11T05:49:48:INFO:werkzeug: 192.168.99.1 - - [11/Oct/2017 05:49:48] "GET /img/localstack_icon.png HTTP/1.1" 200 -
2017-10-11T05:49:49:INFO:werkzeug: 192.168.99.1 - - [11/Oct/2017 05:49:49] "POST /graph HTTP/1.1" 500 -
Traceback (most recent call last):
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1836, in __call__
return self.wsgi_app(environ, start_response)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1820, in wsgi_app
response = self.make_response(self.handle_exception(e))
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1403, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1817, in wsgi_app
response = self.full_dispatch_request()
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1477, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1381, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/code/localstack/localstack/dashboard/api.py", line 37, in get_graph
graph = infra.get_graph(name_filter=data['nameFilter'], env=env)
File "/opt/code/localstack/localstack/dashboard/infra.py", line 432, in get_graph
domains = get_elasticsearch_domains(name_filter, pool=pool, env=env)
File "/opt/code/localstack/localstack/dashboard/infra.py", line 312, in get_elasticsearch_domains
parallelize(handle, out['DomainNames'])
File "/opt/code/localstack/localstack/utils/common.py", line 698, in parallelize
result = pool.map(func, list)
File "/usr/lib/python2.7/multiprocessing/pool.py", line 251, in map
return self.map_async(func, iterable, chunksize).get()
File "/usr/lib/python2.7/multiprocessing/pool.py", line 567, in get
raise self._value
KeyError: 'Endpoint'
|
KeyError
|
def handle(domain):
domain = domain["DomainName"]
if re.match(filter, domain):
details = cmd_es("describe-elasticsearch-domain --domain-name %s" % domain, env)
details = json.loads(details)["DomainStatus"]
arn = details["ARN"]
es = ElasticSearch(arn)
es.endpoint = details.get("Endpoint", "n/a")
result.append(es)
pool[arn] = es
|
def handle(domain):
domain = domain["DomainName"]
if re.match(filter, domain):
details = cmd_es("describe-elasticsearch-domain --domain-name %s" % domain, env)
details = json.loads(details)["DomainStatus"]
arn = details["ARN"]
es = ElasticSearch(arn)
es.endpoint = details["Endpoint"]
result.append(es)
pool[arn] = es
|
https://github.com/localstack/localstack/issues/395
|
2017-10-11T05:49:47:INFO:werkzeug: 192.168.99.1 - - [11/Oct/2017 05:49:47] "GET / HTTP/1.1" 200 -
2017-10-11T05:49:47:INFO:werkzeug: 192.168.99.1 - - [11/Oct/2017 05:49:47] "GET //192.168.99.103:8080/swagger.json HTTP/1.1" 200 -
2017-10-11T05:49:48:INFO:werkzeug: 192.168.99.1 - - [11/Oct/2017 05:49:48] "GET /img/localstack_icon.png HTTP/1.1" 200 -
2017-10-11T05:49:49:INFO:werkzeug: 192.168.99.1 - - [11/Oct/2017 05:49:49] "POST /graph HTTP/1.1" 500 -
Traceback (most recent call last):
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1836, in __call__
return self.wsgi_app(environ, start_response)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1820, in wsgi_app
response = self.make_response(self.handle_exception(e))
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1403, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1817, in wsgi_app
response = self.full_dispatch_request()
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1477, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1381, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/code/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/code/localstack/localstack/dashboard/api.py", line 37, in get_graph
graph = infra.get_graph(name_filter=data['nameFilter'], env=env)
File "/opt/code/localstack/localstack/dashboard/infra.py", line 432, in get_graph
domains = get_elasticsearch_domains(name_filter, pool=pool, env=env)
File "/opt/code/localstack/localstack/dashboard/infra.py", line 312, in get_elasticsearch_domains
parallelize(handle, out['DomainNames'])
File "/opt/code/localstack/localstack/utils/common.py", line 698, in parallelize
result = pool.map(func, list)
File "/usr/lib/python2.7/multiprocessing/pool.py", line 251, in map
return self.map_async(func, iterable, chunksize).get()
File "/usr/lib/python2.7/multiprocessing/pool.py", line 567, in get
raise self._value
KeyError: 'Endpoint'
|
KeyError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.